code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class snake_case : '''simple docstring''' def __init__( self : Optional[Any], _lowerCamelCase : Optional[Any], _lowerCamelCase : str=3, _lowerCamelCase : Tuple=32, _lowerCamelCase : Union[str, Any]=3, _lowerCamelCase : Any=10, _lowerCamelCase : int=[10, 20, 30, 40], _lowerCamelCase : str=[1, 1, 2, 1], _lowerCamelCase : Any=True, _lowerCamelCase : List[str]=True, _lowerCamelCase : List[str]="relu", _lowerCamelCase : str=3, _lowerCamelCase : Dict=None, ): '''simple docstring''' __A = parent __A = batch_size __A = image_size __A = num_channels __A = embeddings_size __A = hidden_sizes __A = depths __A = is_training __A = use_labels __A = hidden_act __A = num_labels __A = scope __A = len(_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' __A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __A = None if self.use_labels: __A = ids_tensor([self.batch_size], self.num_labels ) __A = self.get_config() return config, pixel_values, labels def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' return RegNetConfig( num_channels=self.num_channels, embeddings_size=self.embeddings_size, hidden_sizes=self.hidden_sizes, depths=self.depths, hidden_act=self.hidden_act, num_labels=self.num_labels, ) def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : int, _lowerCamelCase : int, _lowerCamelCase : Dict ): '''simple docstring''' __A = TFRegNetModel(config=_lowerCamelCase ) __A = model(_lowerCamelCase, training=_lowerCamelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), ) def _SCREAMING_SNAKE_CASE ( self : int, _lowerCamelCase : List[str], _lowerCamelCase : Optional[Any], _lowerCamelCase : Any ): '''simple docstring''' __A = self.num_labels __A = TFRegNetForImageClassification(_lowerCamelCase ) __A = model(_lowerCamelCase, labels=_lowerCamelCase, training=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' __A = self.prepare_config_and_inputs() __A , __A , __A = config_and_inputs __A = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class snake_case ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : Any = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () A_ : str = ( {"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification} if is_tf_available() else {} ) A_ : Any = False A_ : Optional[Any] = False A_ : List[str] = False A_ : Tuple = False A_ : List[str] = False def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = TFRegNetModelTester(self ) __A = ConfigTester(self, config_class=_lowerCamelCase, has_text_modality=_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' return @unittest.skip(reason='''RegNet does not use inputs_embeds''' ) def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0, reason='''TF does not support backprop for grouped convolutions on CPU.''', ) @slow def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' super().test_keras_fit() @unittest.skip(reason='''RegNet does not support input and output embeddings''' ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' __A , __A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __A = model_class(_lowerCamelCase ) __A = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __A = [*signature.parameters.keys()] __A = ['''pixel_values'''] self.assertListEqual(arg_names[:1], _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' def check_hidden_states_output(_lowerCamelCase : Optional[Any], _lowerCamelCase : int, _lowerCamelCase : List[str] ): __A = model_class(_lowerCamelCase ) __A = model(**self._prepare_for_class(_lowerCamelCase, _lowerCamelCase ), training=_lowerCamelCase ) __A = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __A = self.model_tester.num_stages self.assertEqual(len(_lowerCamelCase ), expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 2, self.model_tester.image_size // 2], ) __A , __A = self.model_tester.prepare_config_and_inputs_for_common() __A = ['''basic''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: __A = layer_type __A = True check_hidden_states_output(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __A = True check_hidden_states_output(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' __A , __A = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(_lowerCamelCase : Dict, _lowerCamelCase : Tuple, _lowerCamelCase : str, _lowerCamelCase : int={} ): __A = model(_lowerCamelCase, return_dict=_lowerCamelCase, **_lowerCamelCase ) __A = model(_lowerCamelCase, return_dict=_lowerCamelCase, **_lowerCamelCase ).to_tuple() def recursive_check(_lowerCamelCase : List[Any], _lowerCamelCase : int ): if isinstance(_lowerCamelCase, (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(_lowerCamelCase, _lowerCamelCase ): recursive_check(_lowerCamelCase, _lowerCamelCase ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(_lowerCamelCase, _lowerCamelCase ) ), msg=( '''Tuple and dict output are not equal. Difference:''' f' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}' ), ) recursive_check(_lowerCamelCase, _lowerCamelCase ) for model_class in self.all_model_classes: __A = model_class(_lowerCamelCase ) __A = self._prepare_for_class(_lowerCamelCase, _lowerCamelCase ) __A = self._prepare_for_class(_lowerCamelCase, _lowerCamelCase ) check_equivalence(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) __A = self._prepare_for_class(_lowerCamelCase, _lowerCamelCase, return_labels=_lowerCamelCase ) __A = self._prepare_for_class(_lowerCamelCase, _lowerCamelCase, return_labels=_lowerCamelCase ) check_equivalence(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) __A = self._prepare_for_class(_lowerCamelCase, _lowerCamelCase ) __A = self._prepare_for_class(_lowerCamelCase, _lowerCamelCase ) check_equivalence(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, {'''output_hidden_states''': True} ) __A = self._prepare_for_class(_lowerCamelCase, _lowerCamelCase, return_labels=_lowerCamelCase ) __A = self._prepare_for_class(_lowerCamelCase, _lowerCamelCase, return_labels=_lowerCamelCase ) check_equivalence(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, {'''output_hidden_states''': True} ) def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase ) @slow def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __A = TFRegNetModel.from_pretrained(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase ) def lowerCAmelCase ( ): """simple docstring""" __A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class snake_case ( unittest.TestCase ): '''simple docstring''' @cached_property def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _SCREAMING_SNAKE_CASE ( self : List[str] ): '''simple docstring''' __A = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) __A = self.default_image_processor __A = prepare_img() __A = image_processor(images=_lowerCamelCase, return_tensors='''tf''' ) # forward pass __A = model(**_lowerCamelCase, training=_lowerCamelCase ) # verify the logits __A = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape, _lowerCamelCase ) __A = tf.constant([-0.41_80, -1.50_51, -3.48_36] ) tf.debugging.assert_near(outputs.logits[0, :3], _lowerCamelCase, atol=1e-4 )
266
"""simple docstring""" from __future__ import annotations class snake_case : '''simple docstring''' def __init__( self : int, _lowerCamelCase : List[Any]=None ): '''simple docstring''' __A = data __A = None def __repr__( self : Union[str, Any] ): '''simple docstring''' __A = [] __A = self while temp: string_rep.append(f'{temp.data}' ) __A = temp.next return "->".join(_lowerCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" if not elements_list: raise Exception('''The Elements List is empty''' ) __A = __A = Node(elements_list[0] ) for i in range(1 , len(__UpperCamelCase ) ): __A = Node(elements_list[i] ) __A = current.next return head def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" if head_node is not None and isinstance(__UpperCamelCase , __UpperCamelCase ): print_reverse(head_node.next ) print(head_node.data ) def lowerCAmelCase ( ): """simple docstring""" from doctest import testmod testmod() __A = make_linked_list([1_4, 5_2, 1_4, 1_2, 4_3] ) print('''Linked List:''' ) print(__UpperCamelCase ) print('''Elements in Reverse:''' ) print_reverse(__UpperCamelCase ) if __name__ == "__main__": main()
266
1
"""simple docstring""" from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo lowercase_ = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n' lowercase_ = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n' lowercase_ = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case ( datasets.Metric ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''', id='''token''' ), id='''sequence''' ), '''references''': datasets.Sequence( datasets.Sequence(datasets.Value('''string''', id='''token''' ), id='''sequence''' ), id='''references''' ), } ), ) def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : List[List[List[str]]], _lowerCamelCase : List[List[str]], _lowerCamelCase : int = 1, _lowerCamelCase : int = 4, ): '''simple docstring''' return { "google_bleu": gleu_score.corpus_gleu( list_of_references=_lowerCamelCase, hypotheses=_lowerCamelCase, min_len=_lowerCamelCase, max_len=_lowerCamelCase ) }
266
"""simple docstring""" from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging lowercase_ = logging.get_logger(__name__) class snake_case ( _lowerCAmelCase ): '''simple docstring''' A_ : int = ["input_features", "attention_mask"] def __init__( self : Optional[Any], _lowerCamelCase : Union[str, Any]=80, _lowerCamelCase : int=1_60_00, _lowerCamelCase : Any=80, _lowerCamelCase : List[str]=0.0, _lowerCamelCase : int=True, _lowerCamelCase : Optional[Any]=True, _lowerCamelCase : Optional[int]=True, **_lowerCamelCase : List[str], ): '''simple docstring''' super().__init__(feature_size=_lowerCamelCase, sampling_rate=_lowerCamelCase, padding_value=_lowerCamelCase, **_lowerCamelCase ) __A = num_mel_bins __A = do_ceptral_normalize __A = normalize_means __A = normalize_vars __A = True def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : np.ndarray, ): '''simple docstring''' __A = waveform * (2**15) # Kaldi compliance: 16-bit signed integers __A = torch.from_numpy(_lowerCamelCase ).unsqueeze(0 ) __A = ta_kaldi.fbank(_lowerCamelCase, num_mel_bins=self.num_mel_bins, sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : np.ndarray, _lowerCamelCase : int, _lowerCamelCase : Optional[bool] = True, _lowerCamelCase : Optional[bool] = True, _lowerCamelCase : float = 0.0, ): '''simple docstring''' # make sure we normalize float32 arrays if normalize_means: __A = x[:input_length].mean(axis=0 ) __A = np.subtract(_lowerCamelCase, _lowerCamelCase ) if normalize_vars: __A = x[:input_length].std(axis=0 ) __A = np.divide(_lowerCamelCase, _lowerCamelCase ) if input_length < x.shape[0]: __A = padding_value # make sure array is in float32 __A = x.astype(np.floataa ) return x def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : List[np.ndarray], _lowerCamelCase : Optional[np.ndarray] = None ): '''simple docstring''' __A = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(_lowerCamelCase, _lowerCamelCase, self.normalize_means, self.normalize_vars, self.padding_value ) for x, n in zip(_lowerCamelCase, _lowerCamelCase ) ] def __call__( self : Optional[Any], _lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], _lowerCamelCase : Union[bool, str, PaddingStrategy] = False, _lowerCamelCase : Optional[int] = None, _lowerCamelCase : bool = False, _lowerCamelCase : Optional[int] = None, _lowerCamelCase : Optional[Union[str, TensorType]] = None, _lowerCamelCase : Optional[int] = None, _lowerCamelCase : Optional[bool] = None, **_lowerCamelCase : Optional[Any], ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of' f' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with' f' {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) __A = isinstance(_lowerCamelCase, np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'Only mono-channel audio is supported for input to {self}' ) __A = is_batched_numpy or ( isinstance(_lowerCamelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) )) ) if is_batched: __A = [np.asarray(_lowerCamelCase, dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(_lowerCamelCase, np.ndarray ): __A = np.asarray(_lowerCamelCase, dtype=np.floataa ) elif isinstance(_lowerCamelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __A = raw_speech.astype(np.floataa ) # always return batch if not is_batched: __A = [raw_speech] # extract fbank features __A = [self._extract_fbank_features(_lowerCamelCase ) for waveform in raw_speech] # convert into correct format for padding __A = BatchFeature({'''input_features''': features} ) __A = self.pad( _lowerCamelCase, padding=_lowerCamelCase, max_length=_lowerCamelCase, truncation=_lowerCamelCase, pad_to_multiple_of=_lowerCamelCase, return_attention_mask=_lowerCamelCase, **_lowerCamelCase, ) # make sure list is in array format __A = padded_inputs.get('''input_features''' ) if isinstance(input_features[0], _lowerCamelCase ): __A = [np.asarray(_lowerCamelCase, dtype=np.floataa ) for feature in input_features] __A = padded_inputs.get('''attention_mask''' ) if attention_mask is not None: __A = [np.asarray(_lowerCamelCase, dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: __A = ( np.array(_lowerCamelCase, dtype=np.intaa ) if self._get_padding_strategies(_lowerCamelCase, max_length=_lowerCamelCase ) is not PaddingStrategy.DO_NOT_PAD else None ) __A = self.normalize( padded_inputs['''input_features'''], attention_mask=_lowerCamelCase ) if return_tensors is not None: __A = padded_inputs.convert_to_tensors(_lowerCamelCase ) return padded_inputs
266
1
"""simple docstring""" from __future__ import annotations lowercase_ = tuple[int, int, int] lowercase_ = tuple[str, str, str] # used alphabet -------------------------- # from string.ascii_uppercase lowercase_ = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' # -------------------------- default selection -------------------------- # rotors -------------------------- lowercase_ = 'EGZWVONAHDCLFQMSIPJBYUKXTR' lowercase_ = 'FOBHMDKEXQNRAULPGSJVTYICZW' lowercase_ = 'ZJXESIUQLHAVRMDOYGTNFWPBKC' # reflector -------------------------- lowercase_ = { 'A': 'N', 'N': 'A', 'B': 'O', 'O': 'B', 'C': 'P', 'P': 'C', 'D': 'Q', 'Q': 'D', 'E': 'R', 'R': 'E', 'F': 'S', 'S': 'F', 'G': 'T', 'T': 'G', 'H': 'U', 'U': 'H', 'I': 'V', 'V': 'I', 'J': 'W', 'W': 'J', 'K': 'X', 'X': 'K', 'L': 'Y', 'Y': 'L', 'M': 'Z', 'Z': 'M', } # -------------------------- extra rotors -------------------------- lowercase_ = 'RMDJXFUWGISLHVTCQNKYPBEZOA' lowercase_ = 'SGLCPQWZHKXAREONTFBVIYJUDM' lowercase_ = 'HVSICLTYKQUBXDWAJZOMFGPREN' lowercase_ = 'RZWQHFMVDBKICJLNTUXAGYPSOE' lowercase_ = 'LFKIJODBEGAMQPXVUHYSTCZRWN' lowercase_ = 'KOAEGVDHXPQZMLFTYWJNBRCIUS' def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" if (unique_rotsel := len(set(__UpperCamelCase ) )) < 3: __A = f'Please use 3 unique rotors (not {unique_rotsel})' raise Exception(__UpperCamelCase ) # Checks if rotor positions are valid __A , __A , __A = rotpos if not 0 < rotorposa <= len(__UpperCamelCase ): __A = f'First rotor position is not within range of 1..26 ({rotorposa}' raise ValueError(__UpperCamelCase ) if not 0 < rotorposa <= len(__UpperCamelCase ): __A = f'Second rotor position is not within range of 1..26 ({rotorposa})' raise ValueError(__UpperCamelCase ) if not 0 < rotorposa <= len(__UpperCamelCase ): __A = f'Third rotor position is not within range of 1..26 ({rotorposa})' raise ValueError(__UpperCamelCase ) # Validates string and returns dict __A = _plugboard(__UpperCamelCase ) return rotpos, rotsel, pbdict def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" if not isinstance(__UpperCamelCase , __UpperCamelCase ): __A = f'Plugboard setting isn\'t type string ({type(__UpperCamelCase )})' raise TypeError(__UpperCamelCase ) elif len(__UpperCamelCase ) % 2 != 0: __A = f'Odd number of symbols ({len(__UpperCamelCase )})' raise Exception(__UpperCamelCase ) elif pbstring == "": return {} pbstring.replace(''' ''' , '''''' ) # Checks if all characters are unique __A = set() for i in pbstring: if i not in abc: __A = f'\'{i}\' not in list of symbols' raise Exception(__UpperCamelCase ) elif i in tmppbl: __A = f'Duplicate symbol ({i})' raise Exception(__UpperCamelCase ) else: tmppbl.add(__UpperCamelCase ) del tmppbl # Created the dictionary __A = {} for j in range(0 , len(__UpperCamelCase ) - 1 , 2 ): __A = pbstring[j + 1] __A = pbstring[j] return pb def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = (rotora, rotora, rotora) , __UpperCamelCase = "" , ): """simple docstring""" __A = text.upper() __A , __A , __A = _validator( __UpperCamelCase , __UpperCamelCase , plugb.upper() ) __A , __A , __A = rotor_position __A , __A , __A = rotor_selection rotorposa -= 1 rotorposa -= 1 rotorposa -= 1 __A = [] # encryption/decryption process -------------------------- for symbol in text: if symbol in abc: # 1st plugboard -------------------------- if symbol in plugboard: __A = plugboard[symbol] # rotor ra -------------------------- __A = abc.index(__UpperCamelCase ) + rotorposa __A = rotora[index % len(__UpperCamelCase )] # rotor rb -------------------------- __A = abc.index(__UpperCamelCase ) + rotorposa __A = rotora[index % len(__UpperCamelCase )] # rotor rc -------------------------- __A = abc.index(__UpperCamelCase ) + rotorposa __A = rotora[index % len(__UpperCamelCase )] # reflector -------------------------- # this is the reason you don't need another machine to decipher __A = reflector[symbol] # 2nd rotors __A = abc[rotora.index(__UpperCamelCase ) - rotorposa] __A = abc[rotora.index(__UpperCamelCase ) - rotorposa] __A = abc[rotora.index(__UpperCamelCase ) - rotorposa] # 2nd plugboard if symbol in plugboard: __A = plugboard[symbol] # moves/resets rotor positions rotorposa += 1 if rotorposa >= len(__UpperCamelCase ): __A = 0 rotorposa += 1 if rotorposa >= len(__UpperCamelCase ): __A = 0 rotorposa += 1 if rotorposa >= len(__UpperCamelCase ): __A = 0 # else: # pass # Error could be also raised # raise ValueError( # 'Invalid symbol('+repr(symbol)+')') result.append(__UpperCamelCase ) return "".join(__UpperCamelCase ) if __name__ == "__main__": lowercase_ = 'This is my Python script that emulates the Enigma machine from WWII.' lowercase_ = (1, 1, 1) lowercase_ = 'pictures' lowercase_ = (rotora, rotora, rotora) lowercase_ = enigma(message, rotor_pos, rotor_sel, pb) print('Encrypted message:', en) print('Decrypted message:', enigma(en, rotor_pos, rotor_sel, pb))
266
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class snake_case ( unittest.TestCase ): '''simple docstring''' def __init__( self : List[str], _lowerCamelCase : Optional[Any], _lowerCamelCase : Union[str, Any]=13, _lowerCamelCase : Any=3, _lowerCamelCase : Optional[int]=2_24, _lowerCamelCase : str=30, _lowerCamelCase : Dict=4_00, _lowerCamelCase : Union[str, Any]=True, _lowerCamelCase : Any=None, _lowerCamelCase : Optional[Any]=True, _lowerCamelCase : Any=[0.5, 0.5, 0.5], _lowerCamelCase : List[str]=[0.5, 0.5, 0.5], ): '''simple docstring''' __A = size if size is not None else {'''height''': 18, '''width''': 18} __A = parent __A = batch_size __A = num_channels __A = image_size __A = min_resolution __A = max_resolution __A = do_resize __A = size __A = do_normalize __A = image_mean __A = image_std def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : str = ViTImageProcessor if is_vision_available() else None def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = EfficientFormerImageProcessorTester(self ) @property def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' return self.image_proc_tester.prepare_image_processor_dict() def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCamelCase, '''image_mean''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''image_std''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''do_normalize''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''do_resize''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''size''' ) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' # Initialize image_processor __A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __A = prepare_image_inputs(self.image_proc_tester, equal_resolution=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, Image.Image ) # Test not batched input __A = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), ) # Test batched __A = image_processor(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), ) def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' # Initialize image_processor __A = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __A = prepare_image_inputs(self.image_proc_tester, equal_resolution=_lowerCamelCase, numpify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, np.ndarray ) # Test not batched input __A = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), ) # Test batched __A = image_processor(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), ) def _SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' # Initialize image_processor __A = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __A = prepare_image_inputs(self.image_proc_tester, equal_resolution=_lowerCamelCase, torchify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, torch.Tensor ) # Test not batched input __A = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), ) # Test batched __A = image_processor(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), )
266
1
"""simple docstring""" import logging from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import arg_to_scheduler from transformers import TrainingArguments lowercase_ = logging.getLogger(__name__) @dataclass class snake_case ( _lowerCAmelCase ): '''simple docstring''' A_ : Optional[float] = field( default=0.0 , metadata={"help": "The label smoothing epsilon to apply (if not zero)."} ) A_ : bool = field(default=_lowerCAmelCase , metadata={"help": "Whether to SortishSamler or not."} ) A_ : bool = field( default=_lowerCAmelCase , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} ) A_ : bool = field(default=_lowerCAmelCase , metadata={"help": "whether to use adafactor"} ) A_ : Optional[float] = field( default=_lowerCAmelCase , metadata={"help": "Encoder layer dropout probability. Goes into model.config."} ) A_ : Optional[float] = field( default=_lowerCAmelCase , metadata={"help": "Decoder layer dropout probability. Goes into model.config."} ) A_ : Optional[float] = field(default=_lowerCAmelCase , metadata={"help": "Dropout probability. Goes into model.config."} ) A_ : Optional[float] = field( default=_lowerCAmelCase , metadata={"help": "Attention dropout probability. Goes into model.config."} ) A_ : Optional[str] = field( default="linear" , metadata={"help": f'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} , )
266
"""simple docstring""" import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor lowercase_ = logging.get_logger(__name__) class snake_case ( _lowerCAmelCase ): '''simple docstring''' def __init__( self : Optional[int], *_lowerCamelCase : Union[str, Any], **_lowerCamelCase : Dict ): '''simple docstring''' warnings.warn( '''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use SegformerImageProcessor instead.''', _lowerCamelCase, ) super().__init__(*_lowerCamelCase, **_lowerCamelCase )
266
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { 'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json', } class snake_case ( _lowerCAmelCase ): '''simple docstring''' A_ : List[Any] = "switch_transformers" A_ : Optional[int] = ["past_key_values"] A_ : Optional[Any] = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"} def __init__( self : Tuple, _lowerCamelCase : Optional[Any]=3_21_28, _lowerCamelCase : str=7_68, _lowerCamelCase : Optional[int]=64, _lowerCamelCase : Optional[Any]=20_48, _lowerCamelCase : str=64, _lowerCamelCase : Dict=12, _lowerCamelCase : Tuple=3, _lowerCamelCase : Optional[Any]=12, _lowerCamelCase : int=3, _lowerCamelCase : Union[str, Any]=12, _lowerCamelCase : Tuple=8, _lowerCamelCase : int=False, _lowerCamelCase : List[Any]=0.01, _lowerCamelCase : Optional[int]="float32", _lowerCamelCase : Dict=False, _lowerCamelCase : Dict=32, _lowerCamelCase : Any=1_28, _lowerCamelCase : List[Any]=0.1, _lowerCamelCase : Tuple=1e-6, _lowerCamelCase : Tuple=0.0_01, _lowerCamelCase : Union[str, Any]=0.0_01, _lowerCamelCase : Dict=1.0, _lowerCamelCase : Dict="relu", _lowerCamelCase : Any=True, _lowerCamelCase : Any=False, _lowerCamelCase : Tuple=True, _lowerCamelCase : int=0, _lowerCamelCase : Any=1, **_lowerCamelCase : Any, ): '''simple docstring''' __A = vocab_size __A = d_model __A = d_kv __A = d_ff __A = num_sparse_encoder_layers __A = num_layers __A = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry __A = num_sparse_decoder_layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_encoder_layers > 0: __A = self.num_layers // self.num_sparse_encoder_layers else: __A = self.num_layers # HACK: this will create 0 sparse layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_decoder_layers > 0: __A = self.num_decoder_layers // self.num_sparse_decoder_layers else: __A = self.num_decoder_layers # HACK: this will create 0 sparse layers __A = num_heads __A = num_experts __A = expert_capacity __A = router_bias __A = router_jitter_noise if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(f'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}' ) __A = router_dtype __A = router_ignore_padding_tokens __A = relative_attention_num_buckets __A = relative_attention_max_distance __A = dropout_rate __A = layer_norm_epsilon __A = initializer_factor __A = feed_forward_proj __A = use_cache __A = add_router_probs __A = router_z_loss_coef __A = router_aux_loss_coef __A = self.feed_forward_proj.split('''-''' ) __A = act_info[-1] __A = act_info[0] == '''gated''' if len(_lowerCamelCase ) > 1 and act_info[0] != "gated" or len(_lowerCamelCase ) > 2: raise ValueError( f'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.' '''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ''' '''\'gated-gelu\' or \'relu\'''' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": __A = '''gelu_new''' super().__init__( pad_token_id=_lowerCamelCase, eos_token_id=_lowerCamelCase, is_encoder_decoder=_lowerCamelCase, **_lowerCamelCase, )
266
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class snake_case ( unittest.TestCase ): '''simple docstring''' def __init__( self : List[Any], _lowerCamelCase : int, _lowerCamelCase : List[Any]=7, _lowerCamelCase : int=3, _lowerCamelCase : Optional[Any]=18, _lowerCamelCase : Any=30, _lowerCamelCase : str=4_00, _lowerCamelCase : int=True, _lowerCamelCase : Union[str, Any]=None, _lowerCamelCase : str=True, ): '''simple docstring''' __A = size if size is not None else {'''height''': 18, '''width''': 18} __A = parent __A = batch_size __A = num_channels __A = image_size __A = min_resolution __A = max_resolution __A = do_resize __A = size __A = apply_ocr def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : Optional[int] = LayoutLMvaImageProcessor if is_pytesseract_available() else None def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = LayoutLMvaImageProcessingTester(self ) @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' __A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCamelCase, '''do_resize''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''size''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''apply_ocr''' ) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' __A = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size, {'''height''': 18, '''width''': 18} ) __A = self.image_processing_class.from_dict(self.image_processor_dict, size=42 ) self.assertEqual(image_processor.size, {'''height''': 42, '''width''': 42} ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' # Initialize image_processing __A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, Image.Image ) # Test not batched input __A = image_processing(image_inputs[0], return_tensors='''pt''' ) self.assertEqual( encoding.pixel_values.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) self.assertIsInstance(encoding.words, _lowerCamelCase ) self.assertIsInstance(encoding.boxes, _lowerCamelCase ) # Test batched __A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' # Initialize image_processing __A = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase, numpify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, np.ndarray ) # Test not batched input __A = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) # Test batched __A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' # Initialize image_processing __A = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase, torchify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, torch.Tensor ) # Test not batched input __A = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) # Test batched __A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) def _SCREAMING_SNAKE_CASE ( self : List[str] ): '''simple docstring''' # with apply_OCR = True __A = LayoutLMvaImageProcessor() from datasets import load_dataset __A = load_dataset('''hf-internal-testing/fixtures_docvqa''', split='''test''' ) __A = Image.open(ds[0]['''file'''] ).convert('''RGB''' ) __A = image_processing(_lowerCamelCase, return_tensors='''pt''' ) self.assertEqual(encoding.pixel_values.shape, (1, 3, 2_24, 2_24) ) self.assertEqual(len(encoding.words ), len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 __A = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231 __A = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words, _lowerCamelCase ) self.assertListEqual(encoding.boxes, _lowerCamelCase ) # with apply_OCR = False __A = LayoutLMvaImageProcessor(apply_ocr=_lowerCamelCase ) __A = image_processing(_lowerCamelCase, return_tensors='''pt''' ) self.assertEqual(encoding.pixel_values.shape, (1, 3, 2_24, 2_24) )
266
1
"""simple docstring""" import os import pickle import unittest from transformers import AutoTokenizer from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.models.bert_japanese.tokenization_bert_japanese import ( VOCAB_FILES_NAMES, BertJapaneseTokenizer, CharacterTokenizer, JumanppTokenizer, MecabTokenizer, SudachiTokenizer, WordpieceTokenizer, ) from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi from ...test_tokenization_common import TokenizerTesterMixin @custom_tokenizers class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : Union[str, Any] = BertJapaneseTokenizer A_ : Union[str, Any] = False A_ : Optional[int] = True def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' super().setUp() __A = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''', '''世界''', '''##世界''', '''、''', '''##、''', '''。''', '''##。''', ] __A = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def _SCREAMING_SNAKE_CASE ( self : Tuple, _lowerCamelCase : Dict ): '''simple docstring''' __A = '''こんにちは、世界。 \nこんばんは、世界。''' __A = '''こんにちは 、 世界 。 こんばんは 、 世界 。''' return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : Any ): '''simple docstring''' __A , __A = self.get_input_output_texts(_lowerCamelCase ) __A = tokenizer.encode(_lowerCamelCase, add_special_tokens=_lowerCamelCase ) __A = tokenizer.decode(_lowerCamelCase, clean_up_tokenization_spaces=_lowerCamelCase ) return text, ids def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' pass # TODO add if relevant def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' pass # TODO add if relevant def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' pass # TODO add if relevant def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' __A = self.tokenizer_class(self.vocab_file ) __A = tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''' ) self.assertListEqual(_lowerCamelCase, ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ), [3, 12, 10, 14, 4, 9, 12, 10, 14] ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' __A = self.tokenizer_class(self.vocab_file, word_tokenizer_type='''mecab''' ) self.assertIsNotNone(_lowerCamelCase ) __A = '''こんにちは、世界。\nこんばんは、世界。''' __A = tokenizer.tokenize(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase, ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ), [3, 12, 10, 14, 4, 9, 12, 10, 14] ) __A = os.path.join(self.tmpdirname, '''tokenizer.bin''' ) with open(_lowerCamelCase, '''wb''' ) as handle: pickle.dump(_lowerCamelCase, _lowerCamelCase ) with open(_lowerCamelCase, '''rb''' ) as handle: __A = pickle.load(_lowerCamelCase ) __A = tokenizer_new.tokenize(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase, _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : List[str] ): '''simple docstring''' __A = MecabTokenizer(mecab_dic='''ipadic''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''], ) def _SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' try: __A = MecabTokenizer(mecab_dic='''unidic_lite''' ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''], ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' try: __A = MecabTokenizer(mecab_dic='''unidic''' ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''], ) def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' __A = MecabTokenizer(do_lower_case=_lowerCamelCase, mecab_dic='''ipadic''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''], ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' try: __A = MecabTokenizer( do_lower_case=_lowerCamelCase, normalize_text=_lowerCamelCase, mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''' ) except RuntimeError: # if dict doesn't exist in the system, previous code raises this error. return self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''], ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = MecabTokenizer(normalize_text=_lowerCamelCase, mecab_dic='''ipadic''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''], ) @require_sudachi def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = self.tokenizer_class(self.vocab_file, word_tokenizer_type='''sudachi''' ) self.assertIsNotNone(_lowerCamelCase ) __A = '''こんにちは、世界。\nこんばんは、世界。''' __A = tokenizer.tokenize(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase, ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ), [3, 12, 10, 14, 4, 9, 12, 10, 14] ) __A = os.path.join(self.tmpdirname, '''tokenizer.bin''' ) with open(_lowerCamelCase, '''wb''' ) as handle: pickle.dump(_lowerCamelCase, _lowerCamelCase ) with open(_lowerCamelCase, '''rb''' ) as handle: __A = pickle.load(_lowerCamelCase ) __A = tokenizer_new.tokenize(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase, _lowerCamelCase ) @require_sudachi def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' __A = SudachiTokenizer(sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''], ) @require_sudachi def _SCREAMING_SNAKE_CASE ( self : List[str] ): '''simple docstring''' __A = SudachiTokenizer(sudachi_dict_type='''core''', sudachi_split_mode='''A''' ) self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ), ['''外国''', '''人''', '''参政''', '''権'''] ) @require_sudachi def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' __A = SudachiTokenizer(sudachi_dict_type='''core''', sudachi_split_mode='''B''' ) self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ), ['''外国人''', '''参政権'''] ) @require_sudachi def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = SudachiTokenizer(sudachi_dict_type='''core''', sudachi_split_mode='''C''' ) self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ), ['''外国人参政権'''] ) @require_sudachi def _SCREAMING_SNAKE_CASE ( self : List[str] ): '''simple docstring''' __A = SudachiTokenizer(do_lower_case=_lowerCamelCase, sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''], ) @require_sudachi def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = SudachiTokenizer(normalize_text=_lowerCamelCase, sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''], ) @require_sudachi def _SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' __A = SudachiTokenizer(trim_whitespace=_lowerCamelCase, sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''], ) @require_jumanpp def _SCREAMING_SNAKE_CASE ( self : List[str] ): '''simple docstring''' __A = self.tokenizer_class(self.vocab_file, word_tokenizer_type='''jumanpp''' ) self.assertIsNotNone(_lowerCamelCase ) __A = '''こんにちは、世界。\nこんばんは、世界。''' __A = tokenizer.tokenize(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase, ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ), [3, 12, 10, 14, 4, 9, 12, 10, 14] ) __A = os.path.join(self.tmpdirname, '''tokenizer.bin''' ) with open(_lowerCamelCase, '''wb''' ) as handle: pickle.dump(_lowerCamelCase, _lowerCamelCase ) with open(_lowerCamelCase, '''rb''' ) as handle: __A = pickle.load(_lowerCamelCase ) __A = tokenizer_new.tokenize(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase, _lowerCamelCase ) @require_jumanpp def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''], ) @require_jumanpp def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' __A = JumanppTokenizer(do_lower_case=_lowerCamelCase ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''], ) @require_jumanpp def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = JumanppTokenizer(normalize_text=_lowerCamelCase ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''], ) @require_jumanpp def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = JumanppTokenizer(trim_whitespace=_lowerCamelCase ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''], ) @require_jumanpp def _SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' __A = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''' ), ['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''], ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' __A = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは'''] __A = {} for i, token in enumerate(_lowerCamelCase ): __A = i __A = WordpieceTokenizer(vocab=_lowerCamelCase, unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ), [] ) self.assertListEqual(tokenizer.tokenize('''こんにちは''' ), ['''こんにちは'''] ) self.assertListEqual(tokenizer.tokenize('''こんばんは''' ), ['''こん''', '''##ばんは'''] ) self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''' ), ['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''] ) def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' __A = BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''' ) __A = tokenizer.subword_tokenizer __A = subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''' ) self.assertListEqual(_lowerCamelCase, ['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''] ) __A = subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''' ) self.assertListEqual(_lowerCamelCase, ['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' __A = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''' ) __A = tokenizer.encode('''ありがとう。''', add_special_tokens=_lowerCamelCase ) __A = tokenizer.encode('''どういたしまして。''', add_special_tokens=_lowerCamelCase ) __A = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase ) __A = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase, _lowerCamelCase ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : int = BertJapaneseTokenizer A_ : Optional[Any] = False def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' super().setUp() __A = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。'''] __A = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def _SCREAMING_SNAKE_CASE ( self : Dict, **_lowerCamelCase : Any ): '''simple docstring''' return BertJapaneseTokenizer.from_pretrained(self.tmpdirname, subword_tokenizer_type='''character''', **_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : Dict ): '''simple docstring''' __A = '''こんにちは、世界。 \nこんばんは、世界。''' __A = '''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。''' return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' pass # TODO add if relevant def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' pass # TODO add if relevant def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' pass # TODO add if relevant def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' __A = self.tokenizer_class(self.vocab_file, subword_tokenizer_type='''character''' ) __A = tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''' ) self.assertListEqual( _lowerCamelCase, ['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_lowerCamelCase ), [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' __A = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。'''] __A = {} for i, token in enumerate(_lowerCamelCase ): __A = i __A = CharacterTokenizer(vocab=_lowerCamelCase, unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ), [] ) self.assertListEqual(tokenizer.tokenize('''こんにちは''' ), ['''こ''', '''ん''', '''に''', '''ち''', '''は'''] ) self.assertListEqual(tokenizer.tokenize('''こんにちほ''' ), ['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''] ) def _SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' __A = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''' ) __A = tokenizer.encode('''ありがとう。''', add_special_tokens=_lowerCamelCase ) __A = tokenizer.encode('''どういたしまして。''', add_special_tokens=_lowerCamelCase ) __A = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase ) __A = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase, _lowerCamelCase ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = '''cl-tohoku/bert-base-japanese''' __A = AutoTokenizer.from_pretrained(_lowerCamelCase ) self.assertIsInstance(_lowerCamelCase, _lowerCamelCase ) class snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' __A = '''cl-tohoku/bert-base-japanese''' with self.assertLogs('''transformers''', level='''WARNING''' ) as cm: BertTokenizer.from_pretrained(_lowerCamelCase ) self.assertTrue( cm.records[0].message.startswith( '''The tokenizer class you load from this checkpoint is not the same type as the class this function''' ''' is called from.''' ) ) __A = '''bert-base-cased''' with self.assertLogs('''transformers''', level='''WARNING''' ) as cm: BertJapaneseTokenizer.from_pretrained(_lowerCamelCase ) self.assertTrue( cm.records[0].message.startswith( '''The tokenizer class you load from this checkpoint is not the same type as the class this function''' ''' is called from.''' ) )
266
"""simple docstring""" import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class snake_case ( ctypes.Structure ): '''simple docstring''' A_ : List[str] = [("size", ctypes.c_int), ("visible", ctypes.c_byte)] def lowerCAmelCase ( ): """simple docstring""" if os.name == "nt": __A = CursorInfo() __A = ctypes.windll.kernelaa.GetStdHandle(-1_1 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) ) __A = False ctypes.windll.kernelaa.SetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) ) elif os.name == "posix": sys.stdout.write('''\033[?25l''' ) sys.stdout.flush() def lowerCAmelCase ( ): """simple docstring""" if os.name == "nt": __A = CursorInfo() __A = ctypes.windll.kernelaa.GetStdHandle(-1_1 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) ) __A = True ctypes.windll.kernelaa.SetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) ) elif os.name == "posix": sys.stdout.write('''\033[?25h''' ) sys.stdout.flush() @contextmanager def lowerCAmelCase ( ): """simple docstring""" try: hide_cursor() yield finally: show_cursor()
266
1
"""simple docstring""" import faiss # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import requests # noqa: F401 # Here to have a nice missing dependency error message early on import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on from mauve import compute_mauve # From: mauve-text import datasets lowercase_ = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n' lowercase_ = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n' lowercase_ = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case ( datasets.Metric ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, homepage='''https://github.com/krishnap25/mauve''', inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { '''predictions''': datasets.Value('''string''', id='''sequence''' ), '''references''': datasets.Value('''string''', id='''sequence''' ), } ), codebase_urls=['''https://github.com/krishnap25/mauve'''], reference_urls=[ '''https://arxiv.org/abs/2102.01454''', '''https://github.com/krishnap25/mauve''', ], ) def _SCREAMING_SNAKE_CASE ( self : int, _lowerCamelCase : str, _lowerCamelCase : Optional[Any], _lowerCamelCase : Any=None, _lowerCamelCase : Tuple=None, _lowerCamelCase : Optional[Any]=None, _lowerCamelCase : Union[str, Any]=None, _lowerCamelCase : str="auto", _lowerCamelCase : Union[str, Any]=-1, _lowerCamelCase : List[str]=0.9, _lowerCamelCase : int=5, _lowerCamelCase : Tuple=5_00, _lowerCamelCase : Union[str, Any]="gpt2-large", _lowerCamelCase : int=-1, _lowerCamelCase : Union[str, Any]=10_24, _lowerCamelCase : Union[str, Any]=25, _lowerCamelCase : str=5, _lowerCamelCase : Any=True, _lowerCamelCase : Union[str, Any]=25, ): '''simple docstring''' __A = compute_mauve( p_text=_lowerCamelCase, q_text=_lowerCamelCase, p_features=_lowerCamelCase, q_features=_lowerCamelCase, p_tokens=_lowerCamelCase, q_tokens=_lowerCamelCase, num_buckets=_lowerCamelCase, pca_max_data=_lowerCamelCase, kmeans_explained_var=_lowerCamelCase, kmeans_num_redo=_lowerCamelCase, kmeans_max_iter=_lowerCamelCase, featurize_model_name=_lowerCamelCase, device_id=_lowerCamelCase, max_text_length=_lowerCamelCase, divergence_curve_discretization_size=_lowerCamelCase, mauve_scaling_factor=_lowerCamelCase, verbose=_lowerCamelCase, seed=_lowerCamelCase, ) return out
266
"""simple docstring""" import argparse import struct import unittest class snake_case : '''simple docstring''' def __init__( self : Optional[int], _lowerCamelCase : bytes ): '''simple docstring''' __A = data # Initialize hash values __A = [ 0X6a_09e_667, 0Xbb_67a_e85, 0X3c_6ef_372, 0Xa5_4ff_53a, 0X51_0e5_27f, 0X9b_056_88c, 0X1f_83d_9ab, 0X5b_e0c_d19, ] # Initialize round constants __A = [ 0X42_8a2_f98, 0X71_374_491, 0Xb5_c0f_bcf, 0Xe9_b5d_ba5, 0X39_56c_25b, 0X59_f11_1f1, 0X92_3f8_2a4, 0Xab_1c5_ed5, 0Xd8_07a_a98, 0X12_835_b01, 0X24_318_5be, 0X55_0c7_dc3, 0X72_be5_d74, 0X80_deb_1fe, 0X9b_dc0_6a7, 0Xc1_9bf_174, 0Xe4_9b6_9c1, 0Xef_be4_786, 0X0f_c19_dc6, 0X24_0ca_1cc, 0X2d_e92_c6f, 0X4a_748_4aa, 0X5c_b0a_9dc, 0X76_f98_8da, 0X98_3e5_152, 0Xa8_31c_66d, 0Xb0_032_7c8, 0Xbf_597_fc7, 0Xc6_e00_bf3, 0Xd5_a79_147, 0X06_ca6_351, 0X14_292_967, 0X27_b70_a85, 0X2e_1b2_138, 0X4d_2c6_dfc, 0X53_380_d13, 0X65_0a7_354, 0X76_6a0_abb, 0X81_c2c_92e, 0X92_722_c85, 0Xa2_bfe_8a1, 0Xa8_1a6_64b, 0Xc2_4b8_b70, 0Xc7_6c5_1a3, 0Xd1_92e_819, 0Xd6_990_624, 0Xf4_0e3_585, 0X10_6aa_070, 0X19_a4c_116, 0X1e_376_c08, 0X27_487_74c, 0X34_b0b_cb5, 0X39_1c0_cb3, 0X4e_d8a_a4a, 0X5b_9cc_a4f, 0X68_2e6_ff3, 0X74_8f8_2ee, 0X78_a56_36f, 0X84_c87_814, 0X8c_c70_208, 0X90_bef_ffa, 0Xa4_506_ceb, 0Xbe_f9a_3f7, 0Xc6_717_8f2, ] __A = self.preprocessing(self.data ) self.final_hash() @staticmethod def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : bytes ): '''simple docstring''' __A = b'''\x80''' + (b'''\x00''' * (63 - (len(_lowerCamelCase ) + 8) % 64)) __A = struct.pack('''>Q''', (len(_lowerCamelCase ) * 8) ) return data + padding + big_endian_integer def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' # Convert into blocks of 64 bytes __A = [ self.preprocessed_data[x : x + 64] for x in range(0, len(self.preprocessed_data ), 64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers __A = list(struct.unpack('''>16L''', _lowerCamelCase ) ) # add 48 0-ed integers words += [0] * 48 __A , __A , __A , __A , __A , __A , __A , __A = self.hashes for index in range(0, 64 ): if index > 15: # modify the zero-ed indexes at the end of the array __A = ( self.ror(words[index - 15], 7 ) ^ self.ror(words[index - 15], 18 ) ^ (words[index - 15] >> 3) ) __A = ( self.ror(words[index - 2], 17 ) ^ self.ror(words[index - 2], 19 ) ^ (words[index - 2] >> 10) ) __A = ( words[index - 16] + sa + words[index - 7] + sa ) % 0X100_000_000 # Compression __A = self.ror(_lowerCamelCase, 6 ) ^ self.ror(_lowerCamelCase, 11 ) ^ self.ror(_lowerCamelCase, 25 ) __A = (e & f) ^ ((~e & 0Xff_fff_fff) & g) __A = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0X100_000_000 __A = self.ror(_lowerCamelCase, 2 ) ^ self.ror(_lowerCamelCase, 13 ) ^ self.ror(_lowerCamelCase, 22 ) __A = (a & b) ^ (a & c) ^ (b & c) __A = (sa + maj) % 0X100_000_000 __A , __A , __A , __A , __A , __A , __A , __A = ( g, f, e, ((d + tempa) % 0X100_000_000), c, b, a, ((tempa + tempa) % 0X100_000_000), ) __A = [a, b, c, d, e, f, g, h] # Modify final values __A = [ ((element + mutated_hash_values[index]) % 0X100_000_000) for index, element in enumerate(self.hashes ) ] __A = ''''''.join([hex(_lowerCamelCase )[2:].zfill(8 ) for value in self.hashes] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : int, _lowerCamelCase : int ): '''simple docstring''' return 0Xff_fff_fff & (value << (32 - rotations)) | (value >> rotations) class snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' import hashlib __A = bytes('''Test String''', '''utf-8''' ) self.assertEqual(SHAaaa(_lowerCamelCase ).hash, hashlib.shaaaa(_lowerCamelCase ).hexdigest() ) def lowerCAmelCase ( ): """simple docstring""" import doctest doctest.testmod() __A = argparse.ArgumentParser() parser.add_argument( '''-s''' , '''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , ) parser.add_argument( '''-f''' , '''--file''' , dest='''input_file''' , help='''Hash contents of a file''' ) __A = parser.parse_args() __A = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , '''rb''' ) as f: __A = f.read() else: __A = bytes(__UpperCamelCase , '''utf-8''' ) print(SHAaaa(__UpperCamelCase ).hash ) if __name__ == "__main__": main()
266
1
"""simple docstring""" import argparse import torch from safetensors.torch import load_file from diffusers import StableDiffusionPipeline def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" __A = StableDiffusionPipeline.from_pretrained(__UpperCamelCase , torch_dtype=torch.floataa ) # load LoRA weight from .safetensors __A = load_file(__UpperCamelCase ) __A = [] # directly update weight in diffusers model for key in state_dict: # it is suggested to print out the key, it usually will be something like below # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight" # as we have set the alpha beforehand, so just skip if ".alpha" in key or key in visited: continue if "text" in key: __A = key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' ) __A = pipeline.text_encoder else: __A = key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' ) __A = pipeline.unet # find the target layer __A = layer_infos.pop(0 ) while len(__UpperCamelCase ) > -1: try: __A = curr_layer.__getattr__(__UpperCamelCase ) if len(__UpperCamelCase ) > 0: __A = layer_infos.pop(0 ) elif len(__UpperCamelCase ) == 0: break except Exception: if len(__UpperCamelCase ) > 0: temp_name += "_" + layer_infos.pop(0 ) else: __A = layer_infos.pop(0 ) __A = [] if "lora_down" in key: pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) ) pair_keys.append(__UpperCamelCase ) else: pair_keys.append(__UpperCamelCase ) pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) ) # update weight if len(state_dict[pair_keys[0]].shape ) == 4: __A = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) __A = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(__UpperCamelCase , __UpperCamelCase ).unsqueeze(2 ).unsqueeze(3 ) else: __A = state_dict[pair_keys[0]].to(torch.floataa ) __A = state_dict[pair_keys[1]].to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(__UpperCamelCase , __UpperCamelCase ) # update visited list for item in pair_keys: visited.append(__UpperCamelCase ) return pipeline if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument( '--base_model_path', default=None, type=str, required=True, help='Path to the base model in diffusers format.' ) parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument( '--lora_prefix_unet', default='lora_unet', type=str, help='The prefix of UNet weight in safetensors' ) parser.add_argument( '--lora_prefix_text_encoder', default='lora_te', type=str, help='The prefix of text encoder weight in safetensors', ) parser.add_argument('--alpha', default=0.75, type=float, help='The merging ratio in W = W0 + alpha * deltaW') parser.add_argument( '--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.' ) parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)') lowercase_ = parser.parse_args() lowercase_ = args.base_model_path lowercase_ = args.checkpoint_path lowercase_ = args.dump_path lowercase_ = args.lora_prefix_unet lowercase_ = args.lora_prefix_text_encoder lowercase_ = args.alpha lowercase_ = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha) lowercase_ = pipe.to(args.device) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
266
"""simple docstring""" import faiss # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import requests # noqa: F401 # Here to have a nice missing dependency error message early on import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on from mauve import compute_mauve # From: mauve-text import datasets lowercase_ = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n' lowercase_ = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n' lowercase_ = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case ( datasets.Metric ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, homepage='''https://github.com/krishnap25/mauve''', inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { '''predictions''': datasets.Value('''string''', id='''sequence''' ), '''references''': datasets.Value('''string''', id='''sequence''' ), } ), codebase_urls=['''https://github.com/krishnap25/mauve'''], reference_urls=[ '''https://arxiv.org/abs/2102.01454''', '''https://github.com/krishnap25/mauve''', ], ) def _SCREAMING_SNAKE_CASE ( self : int, _lowerCamelCase : str, _lowerCamelCase : Optional[Any], _lowerCamelCase : Any=None, _lowerCamelCase : Tuple=None, _lowerCamelCase : Optional[Any]=None, _lowerCamelCase : Union[str, Any]=None, _lowerCamelCase : str="auto", _lowerCamelCase : Union[str, Any]=-1, _lowerCamelCase : List[str]=0.9, _lowerCamelCase : int=5, _lowerCamelCase : Tuple=5_00, _lowerCamelCase : Union[str, Any]="gpt2-large", _lowerCamelCase : int=-1, _lowerCamelCase : Union[str, Any]=10_24, _lowerCamelCase : Union[str, Any]=25, _lowerCamelCase : str=5, _lowerCamelCase : Any=True, _lowerCamelCase : Union[str, Any]=25, ): '''simple docstring''' __A = compute_mauve( p_text=_lowerCamelCase, q_text=_lowerCamelCase, p_features=_lowerCamelCase, q_features=_lowerCamelCase, p_tokens=_lowerCamelCase, q_tokens=_lowerCamelCase, num_buckets=_lowerCamelCase, pca_max_data=_lowerCamelCase, kmeans_explained_var=_lowerCamelCase, kmeans_num_redo=_lowerCamelCase, kmeans_max_iter=_lowerCamelCase, featurize_model_name=_lowerCamelCase, device_id=_lowerCamelCase, max_text_length=_lowerCamelCase, divergence_curve_discretization_size=_lowerCamelCase, mauve_scaling_factor=_lowerCamelCase, verbose=_lowerCamelCase, seed=_lowerCamelCase, ) return out
266
1
"""simple docstring""" import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class snake_case ( _lowerCAmelCase ): '''simple docstring''' A_ : int = "char" A_ : str = "bpe" A_ : List[str] = "wp" lowercase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class snake_case ( _lowerCAmelCase ): '''simple docstring''' A_ : Any = ["image_processor", "char_tokenizer"] A_ : Union[str, Any] = "ViTImageProcessor" A_ : int = "MgpstrTokenizer" def __init__( self : List[str], _lowerCamelCase : Union[str, Any]=None, _lowerCamelCase : str=None, **_lowerCamelCase : Tuple ): '''simple docstring''' __A = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''', _lowerCamelCase, ) __A = kwargs.pop('''feature_extractor''' ) __A = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) __A = tokenizer __A = AutoTokenizer.from_pretrained('''gpt2''' ) __A = AutoTokenizer.from_pretrained('''bert-base-uncased''' ) super().__init__(_lowerCamelCase, _lowerCamelCase ) def __call__( self : Optional[Any], _lowerCamelCase : Tuple=None, _lowerCamelCase : List[str]=None, _lowerCamelCase : Optional[Any]=None, **_lowerCamelCase : List[Any] ): '''simple docstring''' if images is None and text is None: raise ValueError('''You need to specify either an `images` or `text` input to process.''' ) if images is not None: __A = self.image_processor(_lowerCamelCase, return_tensors=_lowerCamelCase, **_lowerCamelCase ) if text is not None: __A = self.char_tokenizer(_lowerCamelCase, return_tensors=_lowerCamelCase, **_lowerCamelCase ) if text is None: return inputs elif images is None: return encodings else: __A = encodings['''input_ids'''] return inputs def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : Optional[Any] ): '''simple docstring''' __A , __A , __A = sequences __A = char_preds.size(0 ) __A , __A = self._decode_helper(_lowerCamelCase, '''char''' ) __A , __A = self._decode_helper(_lowerCamelCase, '''bpe''' ) __A , __A = self._decode_helper(_lowerCamelCase, '''wp''' ) __A = [] __A = [] for i in range(_lowerCamelCase ): __A = [char_scores[i], bpe_scores[i], wp_scores[i]] __A = [char_strs[i], bpe_strs[i], wp_strs[i]] __A = scores.index(max(_lowerCamelCase ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) __A = {} __A = final_strs __A = final_scores __A = char_strs __A = bpe_strs __A = wp_strs return out def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : Dict, _lowerCamelCase : Dict ): '''simple docstring''' if format == DecodeType.CHARACTER: __A = self.char_decode __A = 1 __A = '''[s]''' elif format == DecodeType.BPE: __A = self.bpe_decode __A = 2 __A = '''#''' elif format == DecodeType.WORDPIECE: __A = self.wp_decode __A = 1_02 __A = '''[SEP]''' else: raise ValueError(f'Format {format} is not supported.' ) __A , __A = [], [] __A = pred_logits.size(0 ) __A = pred_logits.size(1 ) __A , __A = pred_logits.topk(1, dim=-1, largest=_lowerCamelCase, sorted=_lowerCamelCase ) __A = preds_index.view(-1, _lowerCamelCase )[:, 1:] __A = decoder(_lowerCamelCase ) __A , __A = torch.nn.functional.softmax(_lowerCamelCase, dim=2 ).max(dim=2 ) __A = preds_max_prob[:, 1:] for index in range(_lowerCamelCase ): __A = preds_str[index].find(_lowerCamelCase ) __A = preds_str[index][:pred_eos] __A = preds_index[index].cpu().tolist() __A = pred_index.index(_lowerCamelCase ) if eos_token in pred_index else -1 __A = preds_max_prob[index][: pred_eos_index + 1] __A = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(_lowerCamelCase ) conf_scores.append(_lowerCamelCase ) return dec_strs, conf_scores def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : List[str] ): '''simple docstring''' __A = [seq.replace(''' ''', '''''' ) for seq in self.char_tokenizer.batch_decode(_lowerCamelCase )] return decode_strs def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : Dict ): '''simple docstring''' return self.bpe_tokenizer.batch_decode(_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : int ): '''simple docstring''' __A = [seq.replace(''' ''', '''''' ) for seq in self.wp_tokenizer.batch_decode(_lowerCamelCase )] return decode_strs
266
"""simple docstring""" import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs lowercase_ = imread(R'digital_image_processing/image_data/lena_small.jpg') lowercase_ = cvtColor(img, COLOR_BGR2GRAY) def lowerCAmelCase ( ): """simple docstring""" __A = cn.convert_to_negative(__UpperCamelCase ) # assert negative_img array for at least one True assert negative_img.any() def lowerCAmelCase ( ): """simple docstring""" with Image.open('''digital_image_processing/image_data/lena_small.jpg''' ) as img: # Work around assertion for response assert str(cc.change_contrast(__UpperCamelCase , 1_1_0 ) ).startswith( '''<PIL.Image.Image image mode=RGB size=100x100 at''' ) def lowerCAmelCase ( ): """simple docstring""" __A = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def lowerCAmelCase ( ): """simple docstring""" __A = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0 ) # assert ambiguous array for all == True assert canny_img.all() __A = canny.canny(__UpperCamelCase ) # assert canny array for at least one True assert canny_array.any() def lowerCAmelCase ( ): """simple docstring""" assert gg.gaussian_filter(__UpperCamelCase , 5 , sigma=0.9 ).all() def lowerCAmelCase ( ): """simple docstring""" __A = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) __A = conv.img_convolve(__UpperCamelCase , __UpperCamelCase ).astype(__UpperCamelCase ) assert res.any() def lowerCAmelCase ( ): """simple docstring""" assert med.median_filter(__UpperCamelCase , 3 ).any() def lowerCAmelCase ( ): """simple docstring""" __A , __A = sob.sobel_filter(__UpperCamelCase ) assert grad.any() and theta.any() def lowerCAmelCase ( ): """simple docstring""" __A = sp.make_sepia(__UpperCamelCase , 2_0 ) assert sepia.all() def lowerCAmelCase ( __UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" ): """simple docstring""" __A = bs.Burkes(imread(__UpperCamelCase , 1 ) , 1_2_0 ) burkes.process() assert burkes.output_img.any() def lowerCAmelCase ( __UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" , ): """simple docstring""" __A = rs.NearestNeighbour(imread(__UpperCamelCase , 1 ) , 4_0_0 , 2_0_0 ) nn.process() assert nn.output.any() def lowerCAmelCase ( ): """simple docstring""" __A = '''digital_image_processing/image_data/lena.jpg''' # Reading the image and converting it to grayscale. __A = imread(__UpperCamelCase , 0 ) # Test for get_neighbors_pixel function() return not None __A = 0 __A = 0 __A = image[x_coordinate][y_coordinate] __A = lbp.get_neighbors_pixel( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image __A = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): __A = lbp.local_binary_value(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) assert lbp_image.any()
266
1
"""simple docstring""" import os import unittest from transformers import BatchEncoding from transformers.models.bert.tokenization_bert import ( BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer from transformers.testing_utils import require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : int = ProphetNetTokenizer A_ : Tuple = False def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' super().setUp() __A = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] __A = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : Optional[Any] ): '''simple docstring''' __A = '''UNwant\u00E9d,running''' __A = '''unwanted, running''' return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' __A = self.tokenizer_class(self.vocab_file ) __A = tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(_lowerCamelCase, ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ), [9, 6, 7, 12, 10, 11] ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ), ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] ) def _SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' __A = BasicTokenizer(do_lower_case=_lowerCamelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ), ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ), ['''hello'''] ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = BasicTokenizer(do_lower_case=_lowerCamelCase, strip_accents=_lowerCamelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ), ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ), ['''h\u00E9llo'''] ) def _SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' __A = BasicTokenizer(do_lower_case=_lowerCamelCase, strip_accents=_lowerCamelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ), ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ), ['''hello'''] ) def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = BasicTokenizer(do_lower_case=_lowerCamelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ), ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ), ['''hello'''] ) def _SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' __A = BasicTokenizer(do_lower_case=_lowerCamelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ), ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' __A = BasicTokenizer(do_lower_case=_lowerCamelCase, strip_accents=_lowerCamelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ), ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' __A = BasicTokenizer(do_lower_case=_lowerCamelCase, strip_accents=_lowerCamelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ), ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def _SCREAMING_SNAKE_CASE ( self : List[str] ): '''simple docstring''' __A = BasicTokenizer(do_lower_case=_lowerCamelCase, never_split=['''[UNK]'''] ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ), ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] ) def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' __A = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing'''] __A = {} for i, token in enumerate(_lowerCamelCase ): __A = i __A = WordpieceTokenizer(vocab=_lowerCamelCase, unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ), [] ) self.assertListEqual(tokenizer.tokenize('''unwanted running''' ), ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ), ['''[UNK]''', '''runn''', '''##ing'''] ) @require_torch def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' ) __A = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] __A = [10_37, 21_46, 2_04_23, 20_05, 76_80, 78_49, 39_89, 10_12, 1_02] __A = tokenizer(_lowerCamelCase, padding=_lowerCamelCase, return_tensors='''pt''' ) self.assertIsInstance(_lowerCamelCase, _lowerCamelCase ) __A = list(batch.input_ids.numpy()[0] ) self.assertListEqual(_lowerCamelCase, _lowerCamelCase ) self.assertEqual((2, 9), batch.input_ids.shape ) self.assertEqual((2, 9), batch.attention_mask.shape ) def _SCREAMING_SNAKE_CASE ( self : List[str] ): '''simple docstring''' self.assertTrue(_is_whitespace(''' ''' ) ) self.assertTrue(_is_whitespace('''\t''' ) ) self.assertTrue(_is_whitespace('''\r''' ) ) self.assertTrue(_is_whitespace('''\n''' ) ) self.assertTrue(_is_whitespace('''\u00A0''' ) ) self.assertFalse(_is_whitespace('''A''' ) ) self.assertFalse(_is_whitespace('''-''' ) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' self.assertTrue(_is_control('''\u0005''' ) ) self.assertFalse(_is_control('''A''' ) ) self.assertFalse(_is_control(''' ''' ) ) self.assertFalse(_is_control('''\t''' ) ) self.assertFalse(_is_control('''\r''' ) ) def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' self.assertTrue(_is_punctuation('''-''' ) ) self.assertTrue(_is_punctuation('''$''' ) ) self.assertTrue(_is_punctuation('''`''' ) ) self.assertTrue(_is_punctuation('''.''' ) ) self.assertFalse(_is_punctuation('''A''' ) ) self.assertFalse(_is_punctuation(''' ''' ) ) @slow def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' __A = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' ) __A = tokenizer.encode('''sequence builders''', add_special_tokens=_lowerCamelCase ) __A = tokenizer.encode('''multi-sequence build''', add_special_tokens=_lowerCamelCase ) __A = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase ) __A = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase, _lowerCamelCase ) assert encoded_sentence == text + [1_02] assert encoded_pair == text + [1_02] + text_a + [1_02]
266
"""simple docstring""" import itertools import random import unittest import numpy as np from transformers import ASTFeatureExtractor from transformers.testing_utils import require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin lowercase_ = random.Random() if is_torch_available(): import torch def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase=1.0 , __UpperCamelCase=None , __UpperCamelCase=None ): """simple docstring""" if rng is None: __A = global_rng __A = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class snake_case ( unittest.TestCase ): '''simple docstring''' def __init__( self : Any, _lowerCamelCase : List[str], _lowerCamelCase : Any=7, _lowerCamelCase : Optional[int]=4_00, _lowerCamelCase : Optional[int]=20_00, _lowerCamelCase : Dict=1, _lowerCamelCase : Optional[Any]=0.0, _lowerCamelCase : int=1_60_00, _lowerCamelCase : Optional[int]=True, _lowerCamelCase : Dict=True, ): '''simple docstring''' __A = parent __A = batch_size __A = min_seq_length __A = max_seq_length __A = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) __A = feature_size __A = padding_value __A = sampling_rate __A = return_attention_mask __A = do_normalize def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : Optional[Any]=False, _lowerCamelCase : int=False ): '''simple docstring''' def _flatten(_lowerCamelCase : List[str] ): return list(itertools.chain(*_lowerCamelCase ) ) if equal_length: __A = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size __A = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff ) ] if numpify: __A = [np.asarray(_lowerCamelCase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : int = ASTFeatureExtractor def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' __A = ASTFeatureExtractionTester(self ) def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' # Tests that all call wrap to encode_plus and batch_encode_plus __A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 __A = [floats_list((1, x) )[0] for x in range(8_00, 14_00, 2_00 )] __A = [np.asarray(_lowerCamelCase ) for speech_input in speech_inputs] # Test not batched input __A = feat_extract(speech_inputs[0], return_tensors='''np''' ).input_values __A = feat_extract(np_speech_inputs[0], return_tensors='''np''' ).input_values self.assertTrue(np.allclose(_lowerCamelCase, _lowerCamelCase, atol=1e-3 ) ) # Test batched __A = feat_extract(_lowerCamelCase, padding=_lowerCamelCase, return_tensors='''np''' ).input_values __A = feat_extract(_lowerCamelCase, padding=_lowerCamelCase, return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(_lowerCamelCase, _lowerCamelCase ): self.assertTrue(np.allclose(_lowerCamelCase, _lowerCamelCase, atol=1e-3 ) ) # Test 2-D numpy arrays are batched. __A = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)] __A = np.asarray(_lowerCamelCase ) __A = feat_extract(_lowerCamelCase, return_tensors='''np''' ).input_values __A = feat_extract(_lowerCamelCase, return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(_lowerCamelCase, _lowerCamelCase ): self.assertTrue(np.allclose(_lowerCamelCase, _lowerCamelCase, atol=1e-3 ) ) @require_torch def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' import torch __A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __A = np.random.rand(1_00 ).astype(np.floataa ) __A = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: __A = feature_extractor.pad([{'''input_values''': inputs}], return_tensors='''np''' ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) __A = feature_extractor.pad([{'''input_values''': inputs}], return_tensors='''pt''' ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : Union[str, Any] ): '''simple docstring''' from datasets import load_dataset __A = load_dataset('''hf-internal-testing/librispeech_asr_dummy''', '''clean''', split='''validation''' ) # automatic decoding with librispeech __A = ds.sort('''id''' ).select(range(_lowerCamelCase ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] @require_torch def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' # fmt: off __A = torch.tensor( [-0.98_94, -1.27_76, -0.90_66, -1.27_76, -0.93_49, -1.26_09, -1.03_86, -1.27_76, -1.15_61, -1.27_76, -1.20_52, -1.27_23, -1.21_90, -1.21_32, -1.27_76, -1.11_33, -1.19_53, -1.13_43, -1.15_84, -1.22_03, -1.17_70, -1.24_74, -1.23_81, -1.19_36, -0.92_70, -0.83_17, -0.80_49, -0.77_06, -0.75_65, -0.78_69] ) # fmt: on __A = self._load_datasamples(1 ) __A = ASTFeatureExtractor() __A = feature_extractor(_lowerCamelCase, return_tensors='''pt''' ).input_values self.assertEquals(input_values.shape, (1, 10_24, 1_28) ) self.assertTrue(torch.allclose(input_values[0, 0, :30], _lowerCamelCase, atol=1e-4 ) )
266
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) lowercase_ = { 'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'], 'processing_layoutlmv2': ['LayoutLMv2Processor'], 'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ['LayoutLMv2TokenizerFast'] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ['LayoutLMv2FeatureExtractor'] lowercase_ = ['LayoutLMv2ImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ 'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST', 'LayoutLMv2ForQuestionAnswering', 'LayoutLMv2ForSequenceClassification', 'LayoutLMv2ForTokenClassification', 'LayoutLMv2Layer', 'LayoutLMv2Model', 'LayoutLMv2PreTrainedModel', ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
266
"""simple docstring""" def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = current_set.copy() for row_index, row in enumerate(__UpperCamelCase ): __A = row[0] for column_index, column in enumerate(__UpperCamelCase ): if magnitude == 0: __A = column continue __A = column / magnitude # Subtract to cancel term __A = current_set[0] __A = [first_row] __A = current_set[1::] for row in current_set: __A = [] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(__UpperCamelCase ) continue for column_index in range(len(__UpperCamelCase ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(__UpperCamelCase ) # Create next recursion iteration set if len(final_set[0] ) != 3: __A = final_set[0] __A = [] __A = [] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) __A = simplify(__UpperCamelCase ) for i in range(len(__UpperCamelCase ) ): resultant[i].insert(0 , current_first_column[i] ) resultant.insert(0 , __UpperCamelCase ) __A = resultant return final_set def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" if len(__UpperCamelCase ) == 0: raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) __A = len(__UpperCamelCase ) + 1 if any(len(__UpperCamelCase ) != _length for item in equations ): raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) for row in equations: if any(not isinstance(__UpperCamelCase , (int, float) ) for column in row ): raise ValueError('''solve_simultaneous() requires lists of integers''' ) if len(__UpperCamelCase ) == 1: return [equations[0][-1] / equations[0][0]] __A = equations.copy() if any(0 in row for row in data_set ): __A = data_set.copy() __A = [] for row_index, row in enumerate(__UpperCamelCase ): if 0 not in row: __A = data_set.pop(__UpperCamelCase ) break if not full_row: raise ValueError('''solve_simultaneous() requires at least 1 full equation''' ) data_set.insert(0 , __UpperCamelCase ) __A = data_set.copy() __A = simplify(__UpperCamelCase ) __A = simplified[::-1] __A = [] for row in simplified: __A = row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue __A = row.copy()[: len(__UpperCamelCase ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(__UpperCamelCase ) == 0: solutions.append(0 ) continue __A = temp_row[1::] __A = temp_row[::-1] for column_index, column in enumerate(__UpperCamelCase ): current_solution -= column * solutions[column_index] solutions.append(__UpperCamelCase ) __A = [] for item in solutions: final.append(float(round(__UpperCamelCase , 5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() lowercase_ = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
266
1
"""simple docstring""" import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowercase_ = logging.get_logger(__name__) lowercase_ = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } lowercase_ = { 'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'}, 'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'}, 'tokenizer_config_file': { 'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json' }, } lowercase_ = {'facebook/blenderbot-3B': 128} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def lowerCAmelCase ( ): """simple docstring""" __A = ( list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) ) ) __A = bs[:] __A = 0 for b in range(2**8 ): if b not in bs: bs.append(__UpperCamelCase ) cs.append(2**8 + n ) n += 1 __A = [chr(__UpperCamelCase ) for n in cs] return dict(zip(__UpperCamelCase , __UpperCamelCase ) ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = set() __A = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __A = char return pairs class snake_case ( _lowerCAmelCase ): '''simple docstring''' A_ : Tuple = VOCAB_FILES_NAMES A_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP A_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A_ : Optional[Any] = ["input_ids", "attention_mask"] def __init__( self : Dict, _lowerCamelCase : Optional[Any], _lowerCamelCase : List[str], _lowerCamelCase : Dict="replace", _lowerCamelCase : Any="<s>", _lowerCamelCase : Optional[int]="</s>", _lowerCamelCase : Dict="</s>", _lowerCamelCase : List[Any]="<s>", _lowerCamelCase : List[str]="<unk>", _lowerCamelCase : str="<pad>", _lowerCamelCase : Any="<mask>", _lowerCamelCase : Any=False, **_lowerCamelCase : Tuple, ): '''simple docstring''' __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else bos_token __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else eos_token __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else sep_token __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else cls_token __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else unk_token __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else mask_token super().__init__( errors=_lowerCamelCase, bos_token=_lowerCamelCase, eos_token=_lowerCamelCase, unk_token=_lowerCamelCase, sep_token=_lowerCamelCase, cls_token=_lowerCamelCase, pad_token=_lowerCamelCase, mask_token=_lowerCamelCase, add_prefix_space=_lowerCamelCase, **_lowerCamelCase, ) with open(_lowerCamelCase, encoding='''utf-8''' ) as vocab_handle: __A = json.load(_lowerCamelCase ) __A = {v: k for k, v in self.encoder.items()} __A = errors # how to handle errors in decoding __A = bytes_to_unicode() __A = {v: k for k, v in self.byte_encoder.items()} with open(_lowerCamelCase, encoding='''utf-8''' ) as merges_handle: __A = merges_handle.read().split('''\n''' )[1:-1] __A = [tuple(merge.split() ) for merge in bpe_merges] __A = dict(zip(_lowerCamelCase, range(len(_lowerCamelCase ) ) ) ) __A = {} __A = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __A = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' return len(self.encoder ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' return dict(self.encoder, **self.added_tokens_encoder ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : List[Any] ): '''simple docstring''' if token in self.cache: return self.cache[token] __A = tuple(_lowerCamelCase ) __A = get_pairs(_lowerCamelCase ) if not pairs: return token while True: __A = min(_lowerCamelCase, key=lambda _lowerCamelCase : self.bpe_ranks.get(_lowerCamelCase, float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break __A , __A = bigram __A = [] __A = 0 while i < len(_lowerCamelCase ): try: __A = word.index(_lowerCamelCase, _lowerCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __A = j if word[i] == first and i < len(_lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __A = tuple(_lowerCamelCase ) __A = new_word if len(_lowerCamelCase ) == 1: break else: __A = get_pairs(_lowerCamelCase ) __A = ''' '''.join(_lowerCamelCase ) __A = word return word def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : Dict ): '''simple docstring''' __A = [] for token in re.findall(self.pat, _lowerCamelCase ): __A = ''''''.join( self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowerCamelCase ).split(''' ''' ) ) return bpe_tokens def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : Dict ): '''simple docstring''' return self.encoder.get(_lowerCamelCase, self.encoder.get(self.unk_token ) ) def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : Any ): '''simple docstring''' return self.decoder.get(_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : Dict ): '''simple docstring''' __A = ''''''.join(_lowerCamelCase ) __A = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''', errors=self.errors ) return text def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : str, _lowerCamelCase : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(_lowerCamelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __A = os.path.join( _lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) __A = os.path.join( _lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(_lowerCamelCase, '''w''', encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=_lowerCamelCase, ensure_ascii=_lowerCamelCase ) + '''\n''' ) __A = 0 with open(_lowerCamelCase, '''w''', encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda _lowerCamelCase : kv[1] ): if index != token_index: logger.warning( f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.' ''' Please check that the tokenizer is not corrupted!''' ) __A = token_index writer.write(''' '''.join(_lowerCamelCase ) + '''\n''' ) index += 1 return vocab_file, merge_file def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : List[int], _lowerCamelCase : Optional[List[int]] = None, _lowerCamelCase : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCamelCase, token_ids_a=_lowerCamelCase, already_has_special_tokens=_lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(_lowerCamelCase )) + [1] return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1] def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : List[int], _lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' __A = [self.sep_token_id] __A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : Union[str, Any], _lowerCamelCase : List[str]=False, **_lowerCamelCase : List[Any] ): '''simple docstring''' __A = kwargs.pop('''add_prefix_space''', self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(_lowerCamelCase ) > 0 and not text[0].isspace()): __A = ''' ''' + text return (text, kwargs) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : List[int], _lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' return token_ids_a + [self.eos_token_id] def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : "Conversation" ): '''simple docstring''' __A = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(''' ''' + text ) else: # Generated responses should contain them already. inputs.append(_lowerCamelCase ) __A = ''' '''.join(_lowerCamelCase ) __A = self.encode(_lowerCamelCase ) if len(_lowerCamelCase ) > self.model_max_length: __A = input_ids[-self.model_max_length :] logger.warning(f'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' ) return input_ids
266
"""simple docstring""" from __future__ import annotations from typing import Any def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" if not postfix_notation: return 0 __A = {'''+''', '''-''', '''*''', '''/'''} __A = [] for token in postfix_notation: if token in operations: __A , __A = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(__UpperCamelCase ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
266
1
"""simple docstring""" from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_herbert import HerbertTokenizer lowercase_ = logging.get_logger(__name__) lowercase_ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} lowercase_ = { 'vocab_file': { 'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json' }, 'merges_file': { 'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt' }, } lowercase_ = {'allegro/herbert-base-cased': 514} lowercase_ = {} class snake_case ( _lowerCAmelCase ): '''simple docstring''' A_ : str = VOCAB_FILES_NAMES A_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP A_ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION A_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A_ : Union[str, Any] = HerbertTokenizer def __init__( self : List[str], _lowerCamelCase : Union[str, Any]=None, _lowerCamelCase : List[str]=None, _lowerCamelCase : List[str]=None, _lowerCamelCase : int="<s>", _lowerCamelCase : int="<unk>", _lowerCamelCase : Tuple="<pad>", _lowerCamelCase : Union[str, Any]="<mask>", _lowerCamelCase : Any="</s>", **_lowerCamelCase : str, ): '''simple docstring''' super().__init__( _lowerCamelCase, _lowerCamelCase, tokenizer_file=_lowerCamelCase, cls_token=_lowerCamelCase, unk_token=_lowerCamelCase, pad_token=_lowerCamelCase, mask_token=_lowerCamelCase, sep_token=_lowerCamelCase, **_lowerCamelCase, ) def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : List[int], _lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' __A = [self.cls_token_id] __A = [self.sep_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _SCREAMING_SNAKE_CASE ( self : int, _lowerCamelCase : List[int], _lowerCamelCase : Optional[List[int]] = None, _lowerCamelCase : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCamelCase, token_ids_a=_lowerCamelCase, already_has_special_tokens=_lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(_lowerCamelCase )) + [1] return [1] + ([0] * len(_lowerCamelCase )) + [1] + ([0] * len(_lowerCamelCase )) + [1] def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : List[int], _lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' __A = [self.sep_token_id] __A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : str, _lowerCamelCase : Optional[str] = None ): '''simple docstring''' __A = self._tokenizer.model.save(_lowerCamelCase, name=_lowerCamelCase ) return tuple(_lowerCamelCase )
266
"""simple docstring""" import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class snake_case ( unittest.TestCase ): '''simple docstring''' def __init__( self : Optional[Any], _lowerCamelCase : Tuple, _lowerCamelCase : List[str]=13, _lowerCamelCase : Optional[Any]=7, _lowerCamelCase : Optional[int]=True, _lowerCamelCase : int=True, _lowerCamelCase : List[str]=True, _lowerCamelCase : Optional[Any]=True, _lowerCamelCase : int=99, _lowerCamelCase : Optional[int]=32, _lowerCamelCase : Tuple=5, _lowerCamelCase : Tuple=4, _lowerCamelCase : str=37, _lowerCamelCase : Union[str, Any]="gelu", _lowerCamelCase : int=0.1, _lowerCamelCase : List[Any]=0.1, _lowerCamelCase : Dict=5_12, _lowerCamelCase : List[Any]=16, _lowerCamelCase : Any=2, _lowerCamelCase : Any=0.02, _lowerCamelCase : Dict=4, ): '''simple docstring''' __A = parent __A = batch_size __A = seq_length __A = is_training __A = use_attention_mask __A = use_token_type_ids __A = use_labels __A = vocab_size __A = hidden_size __A = num_hidden_layers __A = num_attention_heads __A = intermediate_size __A = hidden_act __A = hidden_dropout_prob __A = attention_probs_dropout_prob __A = max_position_embeddings __A = type_vocab_size __A = type_sequence_label_size __A = initializer_range __A = num_choices def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' __A = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) __A = None if self.use_attention_mask: __A = random_attention_mask([self.batch_size, self.seq_length] ) __A = None if self.use_token_type_ids: __A = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size ) __A = RoFormerConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=_lowerCamelCase, initializer_range=self.initializer_range, ) return config, input_ids, token_type_ids, attention_mask def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = self.prepare_config_and_inputs() __A , __A , __A , __A = config_and_inputs __A = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask} return config, inputs_dict @require_flax class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : Dict = True A_ : Tuple = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' __A = FlaxRoFormerModelTester(self ) @slow def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' for model_class_name in self.all_model_classes: __A = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''', from_pt=_lowerCamelCase ) __A = model(np.ones((1, 1) ) ) self.assertIsNotNone(_lowerCamelCase ) @require_flax class snake_case ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' ) __A = jnp.array([[0, 1, 2, 3, 4, 5]] ) __A = model(_lowerCamelCase )[0] __A = 5_00_00 __A = (1, 6, vocab_size) self.assertEqual(output.shape, _lowerCamelCase ) __A = jnp.array( [[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3], _lowerCamelCase, atol=1e-4 ) )
266
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor lowercase_ = logging.get_logger(__name__) class snake_case ( _lowerCAmelCase ): '''simple docstring''' def __init__( self : int, *_lowerCamelCase : List[Any], **_lowerCamelCase : Optional[Any] ): '''simple docstring''' warnings.warn( '''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use MobileViTImageProcessor instead.''', _lowerCamelCase, ) super().__init__(*_lowerCamelCase, **_lowerCamelCase )
266
"""simple docstring""" from collections import defaultdict from math import ceil, sqrt def lowerCAmelCase ( __UpperCamelCase = 1_0_0_0_0_0_0 , __UpperCamelCase = 1_0 ): """simple docstring""" __A = defaultdict(__UpperCamelCase ) for outer_width in range(3 , (t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: __A = max( ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 ) else: __A = 1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(__UpperCamelCase , outer_width - 1 , 2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 1_0 ) if __name__ == "__main__": print(F'''{solution() = }''')
266
1
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from torch import nn from torch.nn import CrossEntropyLoss from ... import AutoBackbone from ...modeling_outputs import SemanticSegmenterOutput from ...modeling_utils import PreTrainedModel from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings from ...utils.backbone_utils import BackboneMixin from .configuration_upernet import UperNetConfig lowercase_ = [ 'openmmlab/upernet-convnext-tiny', # See all UperNet models at https://huggingface.co/models?filter=upernet ] # General docstring lowercase_ = 'UperNetConfig' class snake_case ( nn.Module ): '''simple docstring''' def __init__( self : List[str], _lowerCamelCase : int, _lowerCamelCase : int, _lowerCamelCase : Union[int, Tuple[int, int]], _lowerCamelCase : Union[int, Tuple[int, int], str] = 0, _lowerCamelCase : bool = False, _lowerCamelCase : Union[int, Tuple[int, int]] = 1, ): '''simple docstring''' super().__init__() __A = nn.Convad( in_channels=_lowerCamelCase, out_channels=_lowerCamelCase, kernel_size=_lowerCamelCase, padding=_lowerCamelCase, bias=_lowerCamelCase, dilation=_lowerCamelCase, ) __A = nn.BatchNormad(_lowerCamelCase ) __A = nn.ReLU() def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : torch.Tensor ): '''simple docstring''' __A = self.conv(_lowerCamelCase ) __A = self.batch_norm(_lowerCamelCase ) __A = self.activation(_lowerCamelCase ) return output class snake_case ( nn.Module ): '''simple docstring''' def __init__( self : List[str], _lowerCamelCase : int, _lowerCamelCase : int, _lowerCamelCase : int ): '''simple docstring''' super().__init__() __A = [ nn.AdaptiveAvgPoolad(_lowerCamelCase ), UperNetConvModule(_lowerCamelCase, _lowerCamelCase, kernel_size=1 ), ] for i, layer in enumerate(self.layers ): self.add_module(str(_lowerCamelCase ), _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : torch.Tensor ): '''simple docstring''' __A = input for layer in self.layers: __A = layer(_lowerCamelCase ) return hidden_state class snake_case ( nn.Module ): '''simple docstring''' def __init__( self : List[str], _lowerCamelCase : Tuple[int, ...], _lowerCamelCase : int, _lowerCamelCase : int, _lowerCamelCase : bool ): '''simple docstring''' super().__init__() __A = pool_scales __A = align_corners __A = in_channels __A = channels __A = [] for i, pool_scale in enumerate(_lowerCamelCase ): __A = UperNetPyramidPoolingBlock(pool_scale=_lowerCamelCase, in_channels=_lowerCamelCase, channels=_lowerCamelCase ) self.blocks.append(_lowerCamelCase ) self.add_module(str(_lowerCamelCase ), _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : torch.Tensor ): '''simple docstring''' __A = [] for ppm in self.blocks: __A = ppm(_lowerCamelCase ) __A = nn.functional.interpolate( _lowerCamelCase, size=x.size()[2:], mode='''bilinear''', align_corners=self.align_corners ) ppm_outs.append(_lowerCamelCase ) return ppm_outs class snake_case ( nn.Module ): '''simple docstring''' def __init__( self : Any, _lowerCamelCase : List[str], _lowerCamelCase : Tuple ): '''simple docstring''' super().__init__() __A = config __A = config.pool_scales # e.g. (1, 2, 3, 6) __A = in_channels __A = config.hidden_size __A = False __A = nn.Convad(self.channels, config.num_labels, kernel_size=1 ) # PSP Module __A = UperNetPyramidPoolingModule( self.pool_scales, self.in_channels[-1], self.channels, align_corners=self.align_corners, ) __A = UperNetConvModule( self.in_channels[-1] + len(self.pool_scales ) * self.channels, self.channels, kernel_size=3, padding=1, ) # FPN Module __A = nn.ModuleList() __A = nn.ModuleList() for in_channels in self.in_channels[:-1]: # skip the top layer __A = UperNetConvModule(_lowerCamelCase, self.channels, kernel_size=1 ) __A = UperNetConvModule(self.channels, self.channels, kernel_size=3, padding=1 ) self.lateral_convs.append(_lowerCamelCase ) self.fpn_convs.append(_lowerCamelCase ) __A = UperNetConvModule( len(self.in_channels ) * self.channels, self.channels, kernel_size=3, padding=1, ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' self.apply(self._init_weights ) def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : str ): '''simple docstring''' if isinstance(_lowerCamelCase, nn.Convad ): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : Union[str, Any] ): '''simple docstring''' __A = inputs[-1] __A = [x] psp_outs.extend(self.psp_modules(_lowerCamelCase ) ) __A = torch.cat(_lowerCamelCase, dim=1 ) __A = self.bottleneck(_lowerCamelCase ) return output def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : torch.Tensor ): '''simple docstring''' # build laterals __A = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )] laterals.append(self.psp_forward(_lowerCamelCase ) ) # build top-down path __A = len(_lowerCamelCase ) for i in range(used_backbone_levels - 1, 0, -1 ): __A = laterals[i - 1].shape[2:] __A = laterals[i - 1] + nn.functional.interpolate( laterals[i], size=_lowerCamelCase, mode='''bilinear''', align_corners=self.align_corners ) # build outputs __A = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )] # append psp feature fpn_outs.append(laterals[-1] ) for i in range(used_backbone_levels - 1, 0, -1 ): __A = nn.functional.interpolate( fpn_outs[i], size=fpn_outs[0].shape[2:], mode='''bilinear''', align_corners=self.align_corners ) __A = torch.cat(_lowerCamelCase, dim=1 ) __A = self.fpn_bottleneck(_lowerCamelCase ) __A = self.classifier(_lowerCamelCase ) return output class snake_case ( nn.Module ): '''simple docstring''' def __init__( self : Union[str, Any], _lowerCamelCase : Tuple, _lowerCamelCase : int = 2, _lowerCamelCase : int = 3, _lowerCamelCase : Union[int, Tuple[int, int]] = 1 ): '''simple docstring''' super().__init__() __A = config __A = config.auxiliary_in_channels __A = config.auxiliary_channels __A = config.auxiliary_num_convs __A = config.auxiliary_concat_input __A = in_index __A = (kernel_size // 2) * dilation __A = [] convs.append( UperNetConvModule( self.in_channels, self.channels, kernel_size=_lowerCamelCase, padding=_lowerCamelCase, dilation=_lowerCamelCase ) ) for i in range(self.num_convs - 1 ): convs.append( UperNetConvModule( self.channels, self.channels, kernel_size=_lowerCamelCase, padding=_lowerCamelCase, dilation=_lowerCamelCase ) ) if self.num_convs == 0: __A = nn.Identity() else: __A = nn.Sequential(*_lowerCamelCase ) if self.concat_input: __A = UperNetConvModule( self.in_channels + self.channels, self.channels, kernel_size=_lowerCamelCase, padding=kernel_size // 2 ) __A = nn.Convad(self.channels, config.num_labels, kernel_size=1 ) def _SCREAMING_SNAKE_CASE ( self : List[str] ): '''simple docstring''' self.apply(self._init_weights ) def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : List[Any] ): '''simple docstring''' if isinstance(_lowerCamelCase, nn.Convad ): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : torch.Tensor ): '''simple docstring''' # just take the relevant feature maps __A = encoder_hidden_states[self.in_index] __A = self.convs(_lowerCamelCase ) if self.concat_input: __A = self.conv_cat(torch.cat([hidden_states, output], dim=1 ) ) __A = self.classifier(_lowerCamelCase ) return output class snake_case ( _lowerCAmelCase ): '''simple docstring''' A_ : List[str] = UperNetConfig A_ : Optional[Any] = "pixel_values" A_ : str = True def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : int ): '''simple docstring''' if isinstance(_lowerCamelCase, _lowerCamelCase ): module.backbone.init_weights() module.decode_head.init_weights() module.auxiliary_head.init_weights() def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' self.backbone.init_weights() self.decode_head.init_weights() self.auxiliary_head.init_weights() def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : Optional[int], _lowerCamelCase : Tuple=False ): '''simple docstring''' if isinstance(_lowerCamelCase, _lowerCamelCase ): __A = value lowercase_ = R'\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' lowercase_ = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( "UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes." , _lowerCAmelCase , ) class snake_case ( _lowerCAmelCase ): '''simple docstring''' def __init__( self : Optional[Any], _lowerCamelCase : str ): '''simple docstring''' super().__init__(_lowerCamelCase ) __A = AutoBackbone.from_config(config.backbone_config ) # Semantic segmentation head(s) __A = UperNetHead(_lowerCamelCase, in_channels=self.backbone.channels ) __A = UperNetFCNHead(_lowerCamelCase ) if config.use_auxiliary_head else None # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) ) @replace_return_docstrings(output_type=_lowerCamelCase, config_class=_CONFIG_FOR_DOC ) def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : Optional[torch.Tensor] = None, _lowerCamelCase : Optional[bool] = None, _lowerCamelCase : Optional[bool] = None, _lowerCamelCase : Optional[torch.Tensor] = None, _lowerCamelCase : Optional[bool] = None, ): '''simple docstring''' __A = return_dict if return_dict is not None else self.config.use_return_dict __A = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __A = output_attentions if output_attentions is not None else self.config.output_attentions __A = self.backbone.forward_with_filtered_kwargs( _lowerCamelCase, output_hidden_states=_lowerCamelCase, output_attentions=_lowerCamelCase ) __A = outputs.feature_maps __A = self.decode_head(_lowerCamelCase ) __A = nn.functional.interpolate(_lowerCamelCase, size=pixel_values.shape[2:], mode='''bilinear''', align_corners=_lowerCamelCase ) __A = None if self.auxiliary_head is not None: __A = self.auxiliary_head(_lowerCamelCase ) __A = nn.functional.interpolate( _lowerCamelCase, size=pixel_values.shape[2:], mode='''bilinear''', align_corners=_lowerCamelCase ) __A = None if labels is not None: if self.config.num_labels == 1: raise ValueError('''The number of labels should be greater than one''' ) else: # compute weighted loss __A = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index ) __A = loss_fct(_lowerCamelCase, _lowerCamelCase ) __A = loss_fct(_lowerCamelCase, _lowerCamelCase ) __A = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss if not return_dict: if output_hidden_states: __A = (logits,) + outputs[1:] else: __A = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SemanticSegmenterOutput( loss=_lowerCamelCase, logits=_lowerCamelCase, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
266
"""simple docstring""" import numpy as np import torch from torch.utils.data import DataLoader from accelerate.utils.dataclasses import DistributedType class snake_case : '''simple docstring''' def __init__( self : Optional[int], _lowerCamelCase : Optional[int]=2, _lowerCamelCase : Optional[int]=3, _lowerCamelCase : int=64, _lowerCamelCase : List[str]=None ): '''simple docstring''' __A = np.random.default_rng(_lowerCamelCase ) __A = length __A = rng.normal(size=(length,) ).astype(np.floataa ) __A = a * self.x + b + rng.normal(scale=0.1, size=(length,) ).astype(np.floataa ) def __len__( self : str ): '''simple docstring''' return self.length def __getitem__( self : Dict, _lowerCamelCase : Optional[int] ): '''simple docstring''' return {"x": self.x[i], "y": self.y[i]} class snake_case ( torch.nn.Module ): '''simple docstring''' def __init__( self : Optional[Any], _lowerCamelCase : Tuple=0, _lowerCamelCase : Any=0, _lowerCamelCase : Optional[Any]=False ): '''simple docstring''' super().__init__() __A = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) __A = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) __A = True def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : Optional[Any]=None ): '''simple docstring''' if self.first_batch: print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' ) __A = False return x * self.a[0] + self.b[0] class snake_case ( torch.nn.Module ): '''simple docstring''' def __init__( self : str, _lowerCamelCase : Optional[Any]=0, _lowerCamelCase : Any=0, _lowerCamelCase : List[Any]=False ): '''simple docstring''' super().__init__() __A = torch.nn.Parameter(torch.tensor(_lowerCamelCase ).float() ) __A = torch.nn.Parameter(torch.tensor(_lowerCamelCase ).float() ) __A = True def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : List[str]=None ): '''simple docstring''' if self.first_batch: print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' ) __A = False return x * self.a + self.b def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase = 1_6 ): """simple docstring""" from datasets import load_dataset from transformers import AutoTokenizer __A = AutoTokenizer.from_pretrained('''bert-base-cased''' ) __A = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''} __A = load_dataset('''csv''' , data_files=__UpperCamelCase ) __A = datasets['''train'''].unique('''label''' ) __A = {v: i for i, v in enumerate(__UpperCamelCase )} def tokenize_function(__UpperCamelCase ): # max_length=None => use the model max length (it's actually the default) __A = tokenizer( examples['''sentence1'''] , examples['''sentence2'''] , truncation=__UpperCamelCase , max_length=__UpperCamelCase , padding='''max_length''' ) if "label" in examples: __A = [label_to_id[l] for l in examples['''label''']] return outputs # Apply the method we just defined to all the examples in all the splits of the dataset __A = datasets.map( __UpperCamelCase , batched=__UpperCamelCase , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , ) def collate_fn(__UpperCamelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(__UpperCamelCase , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' ) return tokenizer.pad(__UpperCamelCase , padding='''longest''' , return_tensors='''pt''' ) # Instantiate dataloaders. __A = DataLoader(tokenized_datasets['''train'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=2 ) __A = DataLoader(tokenized_datasets['''validation'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=1 ) return train_dataloader, eval_dataloader
266
1
"""simple docstring""" from typing import Dict, List, Optional, Tuple, Union import torch from ...models import AutoencoderKL, TransformeraDModel from ...schedulers import KarrasDiffusionSchedulers from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class snake_case ( _lowerCAmelCase ): '''simple docstring''' def __init__( self : Any, _lowerCamelCase : TransformeraDModel, _lowerCamelCase : AutoencoderKL, _lowerCamelCase : KarrasDiffusionSchedulers, _lowerCamelCase : Optional[Dict[int, str]] = None, ): '''simple docstring''' super().__init__() self.register_modules(transformer=_lowerCamelCase, vae=_lowerCamelCase, scheduler=_lowerCamelCase ) # create a imagenet -> id dictionary for easier use __A = {} if idalabel is not None: for key, value in idalabel.items(): for label in value.split(''',''' ): __A = int(_lowerCamelCase ) __A = dict(sorted(self.labels.items() ) ) def _SCREAMING_SNAKE_CASE ( self : int, _lowerCamelCase : Union[str, List[str]] ): '''simple docstring''' if not isinstance(_lowerCamelCase, _lowerCamelCase ): __A = list(_lowerCamelCase ) for l in label: if l not in self.labels: raise ValueError( f'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.' ) return [self.labels[l] for l in label] @torch.no_grad() def __call__( self : Optional[int], _lowerCamelCase : List[int], _lowerCamelCase : float = 4.0, _lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None, _lowerCamelCase : int = 50, _lowerCamelCase : Optional[str] = "pil", _lowerCamelCase : bool = True, ): '''simple docstring''' __A = len(_lowerCamelCase ) __A = self.transformer.config.sample_size __A = self.transformer.config.in_channels __A = randn_tensor( shape=(batch_size, latent_channels, latent_size, latent_size), generator=_lowerCamelCase, device=self.device, dtype=self.transformer.dtype, ) __A = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents __A = torch.tensor(_lowerCamelCase, device=self.device ).reshape(-1 ) __A = torch.tensor([10_00] * batch_size, device=self.device ) __A = torch.cat([class_labels, class_null], 0 ) if guidance_scale > 1 else class_labels # set step values self.scheduler.set_timesteps(_lowerCamelCase ) for t in self.progress_bar(self.scheduler.timesteps ): if guidance_scale > 1: __A = latent_model_input[: len(_lowerCamelCase ) // 2] __A = torch.cat([half, half], dim=0 ) __A = self.scheduler.scale_model_input(_lowerCamelCase, _lowerCamelCase ) __A = t if not torch.is_tensor(_lowerCamelCase ): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) __A = latent_model_input.device.type == '''mps''' if isinstance(_lowerCamelCase, _lowerCamelCase ): __A = torch.floataa if is_mps else torch.floataa else: __A = torch.intaa if is_mps else torch.intaa __A = torch.tensor([timesteps], dtype=_lowerCamelCase, device=latent_model_input.device ) elif len(timesteps.shape ) == 0: __A = timesteps[None].to(latent_model_input.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML __A = timesteps.expand(latent_model_input.shape[0] ) # predict noise model_output __A = self.transformer( _lowerCamelCase, timestep=_lowerCamelCase, class_labels=_lowerCamelCase ).sample # perform guidance if guidance_scale > 1: __A , __A = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:] __A , __A = torch.split(_lowerCamelCase, len(_lowerCamelCase ) // 2, dim=0 ) __A = uncond_eps + guidance_scale * (cond_eps - uncond_eps) __A = torch.cat([half_eps, half_eps], dim=0 ) __A = torch.cat([eps, rest], dim=1 ) # learned sigma if self.transformer.config.out_channels // 2 == latent_channels: __A , __A = torch.split(_lowerCamelCase, _lowerCamelCase, dim=1 ) else: __A = noise_pred # compute previous image: x_t -> x_t-1 __A = self.scheduler.step(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase ).prev_sample if guidance_scale > 1: __A , __A = latent_model_input.chunk(2, dim=0 ) else: __A = latent_model_input __A = 1 / self.vae.config.scaling_factor * latents __A = self.vae.decode(_lowerCamelCase ).sample __A = (samples / 2 + 0.5).clamp(0, 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 __A = samples.cpu().permute(0, 2, 3, 1 ).float().numpy() if output_type == "pil": __A = self.numpy_to_pil(_lowerCamelCase ) if not return_dict: return (samples,) return ImagePipelineOutput(images=_lowerCamelCase )
266
"""simple docstring""" from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo lowercase_ = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n' lowercase_ = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n' lowercase_ = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case ( datasets.Metric ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''', id='''token''' ), id='''sequence''' ), '''references''': datasets.Sequence( datasets.Sequence(datasets.Value('''string''', id='''token''' ), id='''sequence''' ), id='''references''' ), } ), ) def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : List[List[List[str]]], _lowerCamelCase : List[List[str]], _lowerCamelCase : int = 1, _lowerCamelCase : int = 4, ): '''simple docstring''' return { "google_bleu": gleu_score.corpus_gleu( list_of_references=_lowerCamelCase, hypotheses=_lowerCamelCase, min_len=_lowerCamelCase, max_len=_lowerCamelCase ) }
266
1
"""simple docstring""" import pytest lowercase_ = '__dummy_dataset1__' lowercase_ = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n' @pytest.fixture def lowerCAmelCase ( ): """simple docstring""" return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def lowerCAmelCase ( ): """simple docstring""" return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" __A = dataset_loading_script_name __A = tmp_path / '''datasets''' / script_name script_dir.mkdir(parents=__UpperCamelCase ) __A = script_dir / f'{script_name}.py' with open(__UpperCamelCase , '''w''' ) as f: f.write(__UpperCamelCase ) return str(__UpperCamelCase )
266
"""simple docstring""" class snake_case : '''simple docstring''' def __init__( self : List[str], _lowerCamelCase : list[int] ): '''simple docstring''' __A = len(_lowerCamelCase ) __A = [0] * len_array if len_array > 0: __A = array[0] for i in range(1, _lowerCamelCase ): __A = self.prefix_sum[i - 1] + array[i] def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : int, _lowerCamelCase : int ): '''simple docstring''' if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : int ): '''simple docstring''' __A = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(_lowerCamelCase ) return False if __name__ == "__main__": import doctest doctest.testmod()
266
1
"""simple docstring""" import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, AutoConfig, AutoImageProcessor, CLIPConfig, CLIPImageProcessor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_image_processing import CustomImageProcessor # noqa E402 class snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = 0 def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' ) self.assertIsInstance(_lowerCamelCase, _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: __A = Path(_lowerCamelCase ) / '''preprocessor_config.json''' __A = Path(_lowerCamelCase ) / '''config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''}, open(_lowerCamelCase, '''w''' ), ) json.dump({'''model_type''': '''clip'''}, open(_lowerCamelCase, '''w''' ) ) __A = AutoImageProcessor.from_pretrained(_lowerCamelCase ) self.assertIsInstance(_lowerCamelCase, _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' # Ensure we can load the image processor from the feature extractor config with tempfile.TemporaryDirectory() as tmpdirname: __A = Path(_lowerCamelCase ) / '''preprocessor_config.json''' __A = Path(_lowerCamelCase ) / '''config.json''' json.dump( {'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''}, open(_lowerCamelCase, '''w''' ), ) json.dump({'''model_type''': '''clip'''}, open(_lowerCamelCase, '''w''' ) ) __A = AutoImageProcessor.from_pretrained(_lowerCamelCase ) self.assertIsInstance(_lowerCamelCase, _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: __A = CLIPConfig() # Create a dummy config file with image_proceesor_type __A = Path(_lowerCamelCase ) / '''preprocessor_config.json''' __A = Path(_lowerCamelCase ) / '''config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''}, open(_lowerCamelCase, '''w''' ), ) json.dump({'''model_type''': '''clip'''}, open(_lowerCamelCase, '''w''' ) ) # remove image_processor_type to make sure config.json alone is enough to load image processor locally __A = AutoImageProcessor.from_pretrained(_lowerCamelCase ).to_dict() config_dict.pop('''image_processor_type''' ) __A = CLIPImageProcessor(**_lowerCamelCase ) # save in new folder model_config.save_pretrained(_lowerCamelCase ) config.save_pretrained(_lowerCamelCase ) __A = AutoImageProcessor.from_pretrained(_lowerCamelCase ) # make sure private variable is not incorrectly saved __A = json.loads(config.to_json_string() ) self.assertTrue('''_processor_class''' not in dict_as_saved ) self.assertIsInstance(_lowerCamelCase, _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: __A = Path(_lowerCamelCase ) / '''preprocessor_config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''}, open(_lowerCamelCase, '''w''' ), ) __A = AutoImageProcessor.from_pretrained(_lowerCamelCase ) self.assertIsInstance(_lowerCamelCase, _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' with self.assertRaisesRegex( _lowerCamelCase, '''clip-base is not a local folder and is not a valid model identifier''' ): __A = AutoImageProcessor.from_pretrained('''clip-base''' ) def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' with self.assertRaisesRegex( _lowerCamelCase, R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): __A = AutoImageProcessor.from_pretrained(_lowerCamelCase, revision='''aaaaaa''' ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' with self.assertRaisesRegex( _lowerCamelCase, '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''', ): __A = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(_lowerCamelCase ): __A = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(_lowerCamelCase ): __A = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''', trust_remote_code=_lowerCamelCase ) __A = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''', trust_remote_code=_lowerCamelCase ) self.assertEqual(image_processor.__class__.__name__, '''NewImageProcessor''' ) # Test image processor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(_lowerCamelCase ) __A = AutoImageProcessor.from_pretrained(_lowerCamelCase, trust_remote_code=_lowerCamelCase ) self.assertEqual(reloaded_image_processor.__class__.__name__, '''NewImageProcessor''' ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' try: AutoConfig.register('''custom''', _lowerCamelCase ) AutoImageProcessor.register(_lowerCamelCase, _lowerCamelCase ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_lowerCamelCase ): AutoImageProcessor.register(_lowerCamelCase, _lowerCamelCase ) with tempfile.TemporaryDirectory() as tmpdirname: __A = Path(_lowerCamelCase ) / '''preprocessor_config.json''' __A = Path(_lowerCamelCase ) / '''config.json''' json.dump( {'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''}, open(_lowerCamelCase, '''w''' ), ) json.dump({'''model_type''': '''clip'''}, open(_lowerCamelCase, '''w''' ) ) __A = CustomImageProcessor.from_pretrained(_lowerCamelCase ) # Now that the config is registered, it can be used as any other config with the auto-API with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(_lowerCamelCase ) __A = AutoImageProcessor.from_pretrained(_lowerCamelCase ) self.assertIsInstance(_lowerCamelCase, _lowerCamelCase ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' class snake_case ( _lowerCAmelCase ): '''simple docstring''' A_ : Optional[int] = True try: AutoConfig.register('''custom''', _lowerCamelCase ) AutoImageProcessor.register(_lowerCamelCase, _lowerCamelCase ) # If remote code is not set, the default is to use local __A = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' ) self.assertEqual(image_processor.__class__.__name__, '''NewImageProcessor''' ) self.assertTrue(image_processor.is_local ) # If remote code is disabled, we load the local one. __A = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''', trust_remote_code=_lowerCamelCase ) self.assertEqual(image_processor.__class__.__name__, '''NewImageProcessor''' ) self.assertTrue(image_processor.is_local ) # If remote is enabled, we load from the Hub __A = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''', trust_remote_code=_lowerCamelCase ) self.assertEqual(image_processor.__class__.__name__, '''NewImageProcessor''' ) self.assertTrue(not hasattr(_lowerCamelCase, '''is_local''' ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
266
"""simple docstring""" import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowercase_ = logging.get_logger(__name__) lowercase_ = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } lowercase_ = { 'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'}, 'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'}, 'tokenizer_config_file': { 'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json' }, } lowercase_ = {'facebook/blenderbot-3B': 128} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def lowerCAmelCase ( ): """simple docstring""" __A = ( list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) ) ) __A = bs[:] __A = 0 for b in range(2**8 ): if b not in bs: bs.append(__UpperCamelCase ) cs.append(2**8 + n ) n += 1 __A = [chr(__UpperCamelCase ) for n in cs] return dict(zip(__UpperCamelCase , __UpperCamelCase ) ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = set() __A = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __A = char return pairs class snake_case ( _lowerCAmelCase ): '''simple docstring''' A_ : Tuple = VOCAB_FILES_NAMES A_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP A_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A_ : Optional[Any] = ["input_ids", "attention_mask"] def __init__( self : Dict, _lowerCamelCase : Optional[Any], _lowerCamelCase : List[str], _lowerCamelCase : Dict="replace", _lowerCamelCase : Any="<s>", _lowerCamelCase : Optional[int]="</s>", _lowerCamelCase : Dict="</s>", _lowerCamelCase : List[Any]="<s>", _lowerCamelCase : List[str]="<unk>", _lowerCamelCase : str="<pad>", _lowerCamelCase : Any="<mask>", _lowerCamelCase : Any=False, **_lowerCamelCase : Tuple, ): '''simple docstring''' __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else bos_token __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else eos_token __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else sep_token __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else cls_token __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else unk_token __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else mask_token super().__init__( errors=_lowerCamelCase, bos_token=_lowerCamelCase, eos_token=_lowerCamelCase, unk_token=_lowerCamelCase, sep_token=_lowerCamelCase, cls_token=_lowerCamelCase, pad_token=_lowerCamelCase, mask_token=_lowerCamelCase, add_prefix_space=_lowerCamelCase, **_lowerCamelCase, ) with open(_lowerCamelCase, encoding='''utf-8''' ) as vocab_handle: __A = json.load(_lowerCamelCase ) __A = {v: k for k, v in self.encoder.items()} __A = errors # how to handle errors in decoding __A = bytes_to_unicode() __A = {v: k for k, v in self.byte_encoder.items()} with open(_lowerCamelCase, encoding='''utf-8''' ) as merges_handle: __A = merges_handle.read().split('''\n''' )[1:-1] __A = [tuple(merge.split() ) for merge in bpe_merges] __A = dict(zip(_lowerCamelCase, range(len(_lowerCamelCase ) ) ) ) __A = {} __A = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __A = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' return len(self.encoder ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' return dict(self.encoder, **self.added_tokens_encoder ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : List[Any] ): '''simple docstring''' if token in self.cache: return self.cache[token] __A = tuple(_lowerCamelCase ) __A = get_pairs(_lowerCamelCase ) if not pairs: return token while True: __A = min(_lowerCamelCase, key=lambda _lowerCamelCase : self.bpe_ranks.get(_lowerCamelCase, float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break __A , __A = bigram __A = [] __A = 0 while i < len(_lowerCamelCase ): try: __A = word.index(_lowerCamelCase, _lowerCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __A = j if word[i] == first and i < len(_lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __A = tuple(_lowerCamelCase ) __A = new_word if len(_lowerCamelCase ) == 1: break else: __A = get_pairs(_lowerCamelCase ) __A = ''' '''.join(_lowerCamelCase ) __A = word return word def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : Dict ): '''simple docstring''' __A = [] for token in re.findall(self.pat, _lowerCamelCase ): __A = ''''''.join( self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowerCamelCase ).split(''' ''' ) ) return bpe_tokens def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : Dict ): '''simple docstring''' return self.encoder.get(_lowerCamelCase, self.encoder.get(self.unk_token ) ) def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : Any ): '''simple docstring''' return self.decoder.get(_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : Dict ): '''simple docstring''' __A = ''''''.join(_lowerCamelCase ) __A = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''', errors=self.errors ) return text def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : str, _lowerCamelCase : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(_lowerCamelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __A = os.path.join( _lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) __A = os.path.join( _lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(_lowerCamelCase, '''w''', encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=_lowerCamelCase, ensure_ascii=_lowerCamelCase ) + '''\n''' ) __A = 0 with open(_lowerCamelCase, '''w''', encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda _lowerCamelCase : kv[1] ): if index != token_index: logger.warning( f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.' ''' Please check that the tokenizer is not corrupted!''' ) __A = token_index writer.write(''' '''.join(_lowerCamelCase ) + '''\n''' ) index += 1 return vocab_file, merge_file def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : List[int], _lowerCamelCase : Optional[List[int]] = None, _lowerCamelCase : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCamelCase, token_ids_a=_lowerCamelCase, already_has_special_tokens=_lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(_lowerCamelCase )) + [1] return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1] def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : List[int], _lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' __A = [self.sep_token_id] __A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : Union[str, Any], _lowerCamelCase : List[str]=False, **_lowerCamelCase : List[Any] ): '''simple docstring''' __A = kwargs.pop('''add_prefix_space''', self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(_lowerCamelCase ) > 0 and not text[0].isspace()): __A = ''' ''' + text return (text, kwargs) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : List[int], _lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' return token_ids_a + [self.eos_token_id] def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : "Conversation" ): '''simple docstring''' __A = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(''' ''' + text ) else: # Generated responses should contain them already. inputs.append(_lowerCamelCase ) __A = ''' '''.join(_lowerCamelCase ) __A = self.encode(_lowerCamelCase ) if len(_lowerCamelCase ) > self.model_max_length: __A = input_ids[-self.model_max_length :] logger.warning(f'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' ) return input_ids
266
1
"""simple docstring""" import datasets import faiss import numpy as np import streamlit as st import torch from elasticsearch import Elasticsearch from elia_utils import ( embed_questions_for_retrieval, make_qa_sas_model, qa_sas_generate, query_es_index, query_qa_dense_index, ) import transformers from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer lowercase_ = 'bart' lowercase_ = True @st.cache(allow_output_mutation=__UpperCamelCase ) def lowerCAmelCase ( ): """simple docstring""" if LOAD_DENSE_INDEX: __A = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' ) __A = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' ) __A = qar_model.eval() else: __A , __A = (None, None) if MODEL_TYPE == "bart": __A = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' ) __A = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' ) __A = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' ) sas_model.load_state_dict(save_dict['''model'''] ) __A = sas_model.eval() else: __A , __A = make_qa_sas_model( model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' ) return (qar_tokenizer, qar_model, sas_tokenizer, sas_model) @st.cache(allow_output_mutation=__UpperCamelCase ) def lowerCAmelCase ( ): """simple docstring""" if LOAD_DENSE_INDEX: __A = faiss.StandardGpuResources() __A = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train'''] __A = np.memmap( '''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 1_2_8) , ) __A = faiss.IndexFlatIP(1_2_8 ) __A = faiss.index_cpu_to_gpu(__UpperCamelCase , 1 , __UpperCamelCase ) wikiaab_gpu_index_flat.add(__UpperCamelCase ) # TODO fix for larger GPU else: __A , __A = (None, None) __A = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] ) return (wikiaab_passages, wikiaab_gpu_index_flat, es_client) @st.cache(allow_output_mutation=__UpperCamelCase ) def lowerCAmelCase ( ): """simple docstring""" __A = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' ) __A = elia['''train_eli5'''] __A = np.memmap( '''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 1_2_8) ) __A = faiss.IndexFlatIP(1_2_8 ) eli5_train_q_index.add(__UpperCamelCase ) return (elia_train, eli5_train_q_index) lowercase_ , lowercase_ , lowercase_ = load_indexes() lowercase_ , lowercase_ , lowercase_ , lowercase_ = load_models() lowercase_ , lowercase_ = load_train_data() def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase=1_0 ): """simple docstring""" __A = embed_questions_for_retrieval([question] , __UpperCamelCase , __UpperCamelCase ) __A , __A = eli5_train_q_index.search(__UpperCamelCase , __UpperCamelCase ) __A = [elia_train[int(__UpperCamelCase )] for i in I[0]] return nn_examples def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase="wiki40b" , __UpperCamelCase="dense" , __UpperCamelCase=1_0 ): """simple docstring""" if source == "none": __A , __A = (''' <P> '''.join(['''''' for _ in range(1_1 )] ).strip(), []) else: if method == "dense": __A , __A = query_qa_dense_index( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) else: __A , __A = query_es_index( __UpperCamelCase , __UpperCamelCase , index_name='''english_wiki40b_snippets_100w''' , n_results=__UpperCamelCase , ) __A = [ (res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst ] __A = '''question: {} context: {}'''.format(__UpperCamelCase , __UpperCamelCase ) return question_doc, support_list @st.cache( hash_funcs={ torch.Tensor: (lambda __UpperCamelCase : None), transformers.models.bart.tokenization_bart.BartTokenizer: (lambda __UpperCamelCase : None), } ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=6_4 , __UpperCamelCase=2_5_6 , __UpperCamelCase=False , __UpperCamelCase=2 , __UpperCamelCase=0.95 , __UpperCamelCase=0.8 ): """simple docstring""" with torch.no_grad(): __A = qa_sas_generate( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , num_answers=1 , num_beams=__UpperCamelCase , min_len=__UpperCamelCase , max_len=__UpperCamelCase , do_sample=__UpperCamelCase , temp=__UpperCamelCase , top_p=__UpperCamelCase , top_k=__UpperCamelCase , max_input_length=1_0_2_4 , device='''cuda:0''' , )[0] return (answer, support_list) st.title('Long Form Question Answering with ELI5') # Start sidebar lowercase_ = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>' lowercase_ = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % ( header_html, ) st.sidebar.markdown( header_full, unsafe_allow_html=True, ) # Long Form QA with ELI5 and Wikipedia lowercase_ = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n' st.sidebar.markdown(description, unsafe_allow_html=True) lowercase_ = [ 'Answer the question', 'View the retrieved document only', 'View the most similar ELI5 question and answer', 'Show me everything, please!', ] lowercase_ = st.sidebar.checkbox('Demo options') if demo_options: lowercase_ = st.sidebar.selectbox( '', action_list, index=3, ) lowercase_ = action_list.index(action_st) lowercase_ = st.sidebar.selectbox( '', ['Show full text of passages', 'Show passage section titles'], index=0, ) lowercase_ = show_type == 'Show full text of passages' else: lowercase_ = 3 lowercase_ = True lowercase_ = st.sidebar.checkbox('Retrieval options') if retrieval_options: lowercase_ = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n ' st.sidebar.markdown(retriever_info) lowercase_ = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none']) lowercase_ = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed']) else: lowercase_ = 'wiki40b' lowercase_ = 'dense' lowercase_ = 'beam' lowercase_ = 2 lowercase_ = 64 lowercase_ = 256 lowercase_ = None lowercase_ = None lowercase_ = st.sidebar.checkbox('Generation options') if generate_options: lowercase_ = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n ' st.sidebar.markdown(generate_info) lowercase_ = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled']) lowercase_ = st.sidebar.slider( 'Minimum generation length', min_value=8, max_value=256, value=64, step=8, format=None, key=None ) lowercase_ = st.sidebar.slider( 'Maximum generation length', min_value=64, max_value=512, value=256, step=16, format=None, key=None ) if sampled == "beam": lowercase_ = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None) else: lowercase_ = st.sidebar.slider( 'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None ) lowercase_ = st.sidebar.slider( 'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None ) lowercase_ = None # start main text lowercase_ = [ '<MY QUESTION>', 'How do people make chocolate?', 'Why do we get a fever when we are sick?', 'How can different animals perceive different colors?', 'What is natural language processing?', 'What\'s the best way to treat a sunburn?', 'What exactly are vitamins ?', 'How does nuclear energy provide electricity?', 'What\'s the difference between viruses and bacteria?', 'Why are flutes classified as woodwinds when most of them are made out of metal ?', 'Why do people like drinking coffee even though it tastes so bad?', 'What happens when wine ages? How does it make the wine taste better?', 'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?', 'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?', 'How does New Zealand have so many large bird predators?', ] lowercase_ = st.selectbox( 'What would you like to ask? ---- select <MY QUESTION> to enter a new query', questions_list, index=1, ) if question_s == "<MY QUESTION>": lowercase_ = st.text_input('Enter your question here:', '') else: lowercase_ = question_s if st.button('Show me!'): if action in [0, 1, 3]: if index_type == "mixed": lowercase_ , lowercase_ = make_support(question, source=wiki_source, method='dense', n_results=10) lowercase_ , lowercase_ = make_support(question, source=wiki_source, method='sparse', n_results=10) lowercase_ = [] for res_d, res_s in zip(support_list_dense, support_list_sparse): if tuple(res_d) not in support_list: support_list += [tuple(res_d)] if tuple(res_s) not in support_list: support_list += [tuple(res_s)] lowercase_ = support_list[:10] lowercase_ = '<P> ' + ' <P> '.join([res[-1] for res in support_list]) else: lowercase_ , lowercase_ = make_support(question, source=wiki_source, method=index_type, n_results=10) if action in [0, 3]: lowercase_ , lowercase_ = answer_question( question_doc, sas_model, sas_tokenizer, min_len=min_len, max_len=int(max_len), sampling=(sampled == 'sampled'), n_beams=n_beams, top_p=top_p, temp=temp, ) st.markdown('### The model generated answer is:') st.write(answer) if action in [0, 1, 3] and wiki_source != "none": st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:') for i, res in enumerate(support_list): lowercase_ = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_')) lowercase_ = res[1].strip() if sec_titles == "": lowercase_ = '[{}]({})'.format(res[0], wiki_url) else: lowercase_ = sec_titles.split(' & ') lowercase_ = ' & '.join( ['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list] ) st.markdown( '{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections), unsafe_allow_html=True, ) if show_passages: st.write( '> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True ) if action in [2, 3]: lowercase_ = find_nearest_training(question) lowercase_ = nn_train_list[0] st.markdown( '--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title']) ) lowercase_ = [ '{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != ''])) for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score'])) if i == 0 or sc > 2 ] st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st))) lowercase_ = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n' st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
266
"""simple docstring""" import importlib import inspect import json import os import re import shutil import sys from pathlib import Path from typing import Dict, Optional, Union from urllib import request from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info from packaging import version from .. import __version__ from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging lowercase_ = ( 'https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py' ) lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name def lowerCAmelCase ( ): """simple docstring""" __A = '''https://pypi.org/pypi/diffusers/json''' __A = json.loads(request.urlopen(__UpperCamelCase ).read() )['''releases'''].keys() return sorted(__UpperCamelCase , key=lambda __UpperCamelCase : version.Version(__UpperCamelCase ) ) def lowerCAmelCase ( ): """simple docstring""" if HF_MODULES_CACHE in sys.path: return sys.path.append(__UpperCamelCase ) os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase ) __A = Path(__UpperCamelCase ) / '''__init__.py''' if not init_path.exists(): init_path.touch() def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" init_hf_modules() __A = Path(__UpperCamelCase ) / name # If the parent module does not exist yet, recursively create it. if not dynamic_module_path.parent.exists(): create_dynamic_module(dynamic_module_path.parent ) os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase ) __A = dynamic_module_path / '''__init__.py''' if not init_path.exists(): init_path.touch() def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" with open(__UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f: __A = f.read() # Imports of the form `import .xxx` __A = re.findall('''^\s*import\s+\.(\S+)\s*$''' , __UpperCamelCase , flags=re.MULTILINE ) # Imports of the form `from .xxx import yyy` relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , __UpperCamelCase , flags=re.MULTILINE ) # Unique-ify return list(set(__UpperCamelCase ) ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = False __A = [module_file] __A = [] # Let's recurse through all relative imports while not no_change: __A = [] for f in files_to_check: new_imports.extend(get_relative_imports(__UpperCamelCase ) ) __A = Path(__UpperCamelCase ).parent __A = [str(module_path / m ) for m in new_imports] __A = [f for f in new_import_files if f not in all_relative_imports] __A = [f'{f}.py' for f in new_import_files] __A = len(__UpperCamelCase ) == 0 all_relative_imports.extend(__UpperCamelCase ) return all_relative_imports def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" with open(__UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f: __A = f.read() # Imports of the form `import xxx` __A = re.findall('''^\s*import\s+(\S+)\s*$''' , __UpperCamelCase , flags=re.MULTILINE ) # Imports of the form `from xxx import yyy` imports += re.findall('''^\s*from\s+(\S+)\s+import''' , __UpperCamelCase , flags=re.MULTILINE ) # Only keep the top-level module __A = [imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )] # Unique-ify and test we got them all __A = list(set(__UpperCamelCase ) ) __A = [] for imp in imports: try: importlib.import_module(__UpperCamelCase ) except ImportError: missing_packages.append(__UpperCamelCase ) if len(__UpperCamelCase ) > 0: raise ImportError( '''This modeling file requires the following packages that were not found in your environment: ''' f'{", ".join(__UpperCamelCase )}. Run `pip install {" ".join(__UpperCamelCase )}`' ) return get_relative_imports(__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" __A = module_path.replace(os.path.sep , '''.''' ) __A = importlib.import_module(__UpperCamelCase ) if class_name is None: return find_pipeline_class(__UpperCamelCase ) return getattr(__UpperCamelCase , __UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" from ..pipelines import DiffusionPipeline __A = dict(inspect.getmembers(__UpperCamelCase , inspect.isclass ) ) __A = None for cls_name, cls in cls_members.items(): if ( cls_name != DiffusionPipeline.__name__ and issubclass(cls , __UpperCamelCase ) and cls.__module__.split('''.''' )[0] != "diffusers" ): if pipeline_class is not None: raise ValueError( f'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:' f' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in' f' {loaded_module}.' ) __A = cls return pipeline_class def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , ): """simple docstring""" __A = str(__UpperCamelCase ) __A = os.path.join(__UpperCamelCase , __UpperCamelCase ) if os.path.isfile(__UpperCamelCase ): __A = module_file_or_url __A = '''local''' elif pretrained_model_name_or_path.count('''/''' ) == 0: __A = get_diffusers_versions() # cut ".dev0" __A = '''v''' + '''.'''.join(__version__.split('''.''' )[:3] ) # retrieve github version that matches if revision is None: __A = latest_version if latest_version[1:] in available_versions else '''main''' logger.info(f'Defaulting to latest_version: {revision}.' ) elif revision in available_versions: __A = f'v{revision}' elif revision == "main": __A = revision else: raise ValueError( f'`custom_revision`: {revision} does not exist. Please make sure to choose one of' f' {", ".join(available_versions + ["main"] )}.' ) # community pipeline on GitHub __A = COMMUNITY_PIPELINES_URL.format(revision=__UpperCamelCase , pipeline=__UpperCamelCase ) try: __A = cached_download( __UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , proxies=__UpperCamelCase , resume_download=__UpperCamelCase , local_files_only=__UpperCamelCase , use_auth_token=__UpperCamelCase , ) __A = '''git''' __A = pretrained_model_name_or_path + '''.py''' except EnvironmentError: logger.error(f'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' ) raise else: try: # Load from URL or cache if already cached __A = hf_hub_download( __UpperCamelCase , __UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , proxies=__UpperCamelCase , resume_download=__UpperCamelCase , local_files_only=__UpperCamelCase , use_auth_token=__UpperCamelCase , ) __A = os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) ) except EnvironmentError: logger.error(f'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' ) raise # Check we have all the requirements in our environment __A = check_imports(__UpperCamelCase ) # Now we move the module inside our cached dynamic modules. __A = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule create_dynamic_module(__UpperCamelCase ) __A = Path(__UpperCamelCase ) / full_submodule if submodule == "local" or submodule == "git": # We always copy local files (we could hash the file to see if there was a change, and give them the name of # that hash, to only copy when there is a modification but it seems overkill for now). # The only reason we do the copy is to avoid putting too many folders in sys.path. shutil.copy(__UpperCamelCase , submodule_path / module_file ) for module_needed in modules_needed: __A = f'{module_needed}.py' shutil.copy(os.path.join(__UpperCamelCase , __UpperCamelCase ) , submodule_path / module_needed ) else: # Get the commit hash # TODO: we will get this info in the etag soon, so retrieve it from there and not here. if isinstance(__UpperCamelCase , __UpperCamelCase ): __A = use_auth_token elif use_auth_token is True: __A = HfFolder.get_token() else: __A = None __A = model_info(__UpperCamelCase , revision=__UpperCamelCase , token=__UpperCamelCase ).sha # The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the # benefit of versioning. __A = submodule_path / commit_hash __A = full_submodule + os.path.sep + commit_hash create_dynamic_module(__UpperCamelCase ) if not (submodule_path / module_file).exists(): shutil.copy(__UpperCamelCase , submodule_path / module_file ) # Make sure we also have every file with relative for module_needed in modules_needed: if not (submodule_path / module_needed).exists(): get_cached_module_file( __UpperCamelCase , f'{module_needed}.py' , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , resume_download=__UpperCamelCase , proxies=__UpperCamelCase , use_auth_token=__UpperCamelCase , revision=__UpperCamelCase , local_files_only=__UpperCamelCase , ) return os.path.join(__UpperCamelCase , __UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , **__UpperCamelCase , ): """simple docstring""" __A = get_cached_module_file( __UpperCamelCase , __UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , resume_download=__UpperCamelCase , proxies=__UpperCamelCase , use_auth_token=__UpperCamelCase , revision=__UpperCamelCase , local_files_only=__UpperCamelCase , ) return get_class_in_module(__UpperCamelCase , final_module.replace('''.py''' , '''''' ) )
266
1
"""simple docstring""" def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
266
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : Optional[int] ): '''simple docstring''' for model_result in results.values(): for batch_size, sequence_length in zip(model_result['''bs'''], model_result['''ss'''] ): __A = model_result['''result'''][batch_size][sequence_length] self.assertIsNotNone(_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = '''sgugger/tiny-distilbert-classification''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, only_pretrain_model=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, torchscript=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == '''cpu''', '''Cant do half precision''' ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, fpaa=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = AutoConfig.from_pretrained(_lowerCamelCase ) # set architectures equal to `None` __A = None __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase, configs=[config] ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == '''cpu''', '''Can\'t do half precision''' ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], fpaa=_lowerCamelCase, multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = AutoConfig.from_pretrained(_lowerCamelCase ) __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase, configs=[config] ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' __A = '''sshleifer/tinier_bart''' __A = AutoConfig.from_pretrained(_lowerCamelCase ) __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase, configs=[config] ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = AutoConfig.from_pretrained(_lowerCamelCase ) __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase, configs=[config] ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = '''sshleifer/tinier_bart''' __A = AutoConfig.from_pretrained(_lowerCamelCase ) __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase, configs=[config] ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' with tempfile.TemporaryDirectory() as tmp_dir: __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, save_to_csv=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], inference_time_csv_file=os.path.join(_lowerCamelCase, '''inf_time.csv''' ), train_memory_csv_file=os.path.join(_lowerCamelCase, '''train_mem.csv''' ), inference_memory_csv_file=os.path.join(_lowerCamelCase, '''inf_mem.csv''' ), train_time_csv_file=os.path.join(_lowerCamelCase, '''train_time.csv''' ), env_info_csv_file=os.path.join(_lowerCamelCase, '''env.csv''' ), multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) benchmark.run() self.assertTrue(Path(os.path.join(_lowerCamelCase, '''inf_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCamelCase, '''train_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCamelCase, '''inf_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCamelCase, '''train_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCamelCase, '''env.csv''' ) ).exists() ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' def _check_summary_is_not_empty(_lowerCamelCase : List[Any] ): self.assertTrue(hasattr(_lowerCamelCase, '''sequential''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''cumulative''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''current''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''total''' ) ) with tempfile.TemporaryDirectory() as tmp_dir: __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], log_filename=os.path.join(_lowerCamelCase, '''log.txt''' ), log_print=_lowerCamelCase, trace_memory_line_by_line=_lowerCamelCase, multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(_lowerCamelCase, '''log.txt''' ) ).exists() )
266
1
"""simple docstring""" import logging import math from functools import partial from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union import torch from .tensor_utils import tensor_tree_map, tree_map def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = [] if isinstance(__UpperCamelCase , __UpperCamelCase ): for v in tree.values(): shapes.extend(_fetch_dims(__UpperCamelCase ) ) elif isinstance(__UpperCamelCase , (list, tuple) ): for t in tree: shapes.extend(_fetch_dims(__UpperCamelCase ) ) elif isinstance(__UpperCamelCase , torch.Tensor ): shapes.append(tree.shape ) else: raise ValueError('''Not supported''' ) return shapes @torch.jit.ignore def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" __A = [] for d in reversed(__UpperCamelCase ): idx.append(flat_idx % d ) __A = flat_idx // d return tuple(reversed(__UpperCamelCase ) ) @torch.jit.ignore def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , ): """simple docstring""" def reduce_edge_list(__UpperCamelCase ) -> None: __A = True for i in range(len(__UpperCamelCase ) ): __A = -1 * (i + 1) l[reversed_idx] &= tally __A = l[reversed_idx] if start_edges is None: __A = [s == 0 for s in start] reduce_edge_list(__UpperCamelCase ) if end_edges is None: __A = [e == (d - 1) for e, d in zip(__UpperCamelCase , __UpperCamelCase )] reduce_edge_list(__UpperCamelCase ) # Base cases. Either start/end are empty and we're done, or the final, # one-dimensional tensor can be simply sliced if len(__UpperCamelCase ) == 0: return [()] elif len(__UpperCamelCase ) == 1: return [(slice(start[0] , end[0] + 1 ),)] __A = [] __A = [] # Dimensions common to start and end can be selected directly for s, e in zip(__UpperCamelCase , __UpperCamelCase ): if s == e: path_list.append(slice(__UpperCamelCase , s + 1 ) ) else: break __A = tuple(__UpperCamelCase ) __A = len(__UpperCamelCase ) # start == end, and we're done if divergence_idx == len(__UpperCamelCase ): return [path] def upper() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None __A = start[divergence_idx] return tuple( path + (slice(__UpperCamelCase , sdi + 1 ),) + s for s in _get_minimal_slice_set( start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) ) def lower() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None __A = end[divergence_idx] return tuple( path + (slice(__UpperCamelCase , edi + 1 ),) + s for s in _get_minimal_slice_set( [0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) ) # If both start and end are at the edges of the subtree rooted at # divergence_idx, we can just select the whole subtree at once if start_edges[divergence_idx] and end_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) ) # If just start is at the edge, we can grab almost all of the subtree, # treating only the ragged bottom edge as an edge case elif start_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) ) slices.extend(lower() ) # Analogous to the previous case, but the top is ragged this time elif end_edges[divergence_idx]: slices.extend(upper() ) slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) ) # If both sides of the range are ragged, we need to handle both sides # separately. If there's contiguous meat in between them, we can index it # in one big chunk else: slices.extend(upper() ) __A = end[divergence_idx] - start[divergence_idx] if middle_ground > 1: slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) ) slices.extend(lower() ) return slices @torch.jit.ignore def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" __A = t.shape[:no_batch_dims] __A = list(_flat_idx_to_idx(__UpperCamelCase , __UpperCamelCase ) ) # _get_minimal_slice_set is inclusive __A = list(_flat_idx_to_idx(flat_end - 1 , __UpperCamelCase ) ) # Get an ordered list of slices to perform __A = _get_minimal_slice_set( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) __A = [t[s] for s in slices] return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = False , ): """simple docstring""" if not (len(__UpperCamelCase ) > 0): raise ValueError('''Must provide at least one input''' ) __A = [shape[:no_batch_dims] for shape in _fetch_dims(__UpperCamelCase )] __A = tuple([max(__UpperCamelCase ) for s in zip(*__UpperCamelCase )] ) def _prep_inputs(__UpperCamelCase ) -> torch.Tensor: if not low_mem: if not sum(t.shape[:no_batch_dims] ) == no_batch_dims: __A = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) __A = t.reshape(-1 , *t.shape[no_batch_dims:] ) else: __A = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) return t __A = tensor_tree_map(_prep_inputs , __UpperCamelCase ) __A = None if _out is not None: __A = tensor_tree_map(lambda __UpperCamelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out ) __A = 1 for d in orig_batch_dims: flat_batch_dim *= d __A = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0) def _select_chunk(__UpperCamelCase ) -> torch.Tensor: return t[i : i + chunk_size] if t.shape[0] != 1 else t __A = 0 __A = prepped_outputs for _ in range(__UpperCamelCase ): # Chunk the input if not low_mem: __A = _select_chunk else: __A = partial( _chunk_slice , flat_start=__UpperCamelCase , flat_end=min(__UpperCamelCase , i + chunk_size ) , no_batch_dims=len(__UpperCamelCase ) , ) __A = tensor_tree_map(__UpperCamelCase , __UpperCamelCase ) # Run the layer on the chunk __A = layer(**__UpperCamelCase ) # Allocate space for the output if out is None: __A = tensor_tree_map(lambda __UpperCamelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , __UpperCamelCase ) # Put the chunk in its pre-allocated space if isinstance(__UpperCamelCase , __UpperCamelCase ): def assign(__UpperCamelCase , __UpperCamelCase ) -> None: for k, v in da.items(): if isinstance(__UpperCamelCase , __UpperCamelCase ): assign(__UpperCamelCase , da[k] ) else: if _add_into_out: v[i : i + chunk_size] += da[k] else: __A = da[k] assign(__UpperCamelCase , __UpperCamelCase ) elif isinstance(__UpperCamelCase , __UpperCamelCase ): for xa, xa in zip(__UpperCamelCase , __UpperCamelCase ): if _add_into_out: xa[i : i + chunk_size] += xa else: __A = xa elif isinstance(__UpperCamelCase , torch.Tensor ): if _add_into_out: out[i : i + chunk_size] += output_chunk else: __A = output_chunk else: raise ValueError('''Not supported''' ) i += chunk_size __A = tensor_tree_map(lambda __UpperCamelCase : t.view(orig_batch_dims + t.shape[1:] ) , __UpperCamelCase ) return out class snake_case : '''simple docstring''' def __init__( self : int, _lowerCamelCase : int = 5_12, ): '''simple docstring''' __A = max_chunk_size __A = None __A = None def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : Callable, _lowerCamelCase : tuple, _lowerCamelCase : int ): '''simple docstring''' logging.info('''Tuning chunk size...''' ) if min_chunk_size >= self.max_chunk_size: return min_chunk_size __A = [2**l for l in range(int(math.log(self.max_chunk_size, 2 ) ) + 1 )] __A = [c for c in candidates if c > min_chunk_size] __A = [min_chunk_size] + candidates candidates[-1] += 4 def test_chunk_size(_lowerCamelCase : int ) -> bool: try: with torch.no_grad(): fn(*_lowerCamelCase, chunk_size=_lowerCamelCase ) return True except RuntimeError: return False __A = 0 __A = len(_lowerCamelCase ) - 1 while i > min_viable_chunk_size_index: __A = test_chunk_size(candidates[i] ) if not viable: __A = (min_viable_chunk_size_index + i) // 2 else: __A = i __A = (i + len(_lowerCamelCase ) - 1) // 2 return candidates[min_viable_chunk_size_index] def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : Iterable, _lowerCamelCase : Iterable ): '''simple docstring''' __A = True for aa, aa in zip(_lowerCamelCase, _lowerCamelCase ): assert type(_lowerCamelCase ) == type(_lowerCamelCase ) if isinstance(_lowerCamelCase, (list, tuple) ): consistent &= self._compare_arg_caches(_lowerCamelCase, _lowerCamelCase ) elif isinstance(_lowerCamelCase, _lowerCamelCase ): __A = [v for _, v in sorted(aa.items(), key=lambda _lowerCamelCase : x[0] )] __A = [v for _, v in sorted(aa.items(), key=lambda _lowerCamelCase : x[0] )] consistent &= self._compare_arg_caches(_lowerCamelCase, _lowerCamelCase ) else: consistent &= aa == aa return consistent def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : Callable, _lowerCamelCase : tuple, _lowerCamelCase : int, ): '''simple docstring''' __A = True __A = tree_map(lambda _lowerCamelCase : a.shape if isinstance(_lowerCamelCase, torch.Tensor ) else a, _lowerCamelCase, _lowerCamelCase ) if self.cached_arg_data is not None: # If args have changed shape/value, we need to re-tune assert len(self.cached_arg_data ) == len(_lowerCamelCase ) __A = self._compare_arg_caches(self.cached_arg_data, _lowerCamelCase ) else: # Otherwise, we can reuse the precomputed value __A = False if not consistent: __A = self._determine_favorable_chunk_size( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, ) __A = arg_data assert self.cached_chunk_size is not None return self.cached_chunk_size
266
"""simple docstring""" import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowercase_ = get_tests_dir('fixtures/test_sentencepiece_no_bos.model') @require_sentencepiece @require_tokenizers class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : Optional[Any] = PegasusTokenizer A_ : int = PegasusTokenizerFast A_ : Optional[Any] = True A_ : Union[str, Any] = True def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing __A = PegasusTokenizer(_lowerCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' return PegasusTokenizer.from_pretrained('''google/pegasus-large''' ) def _SCREAMING_SNAKE_CASE ( self : int, **_lowerCamelCase : List[Any] ): '''simple docstring''' return PegasusTokenizer.from_pretrained(self.tmpdirname, **_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : Dict ): '''simple docstring''' return ("This is a test", "This is a test") def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' __A = '''</s>''' __A = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ), _lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ), _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' __A = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0], '''<pad>''' ) self.assertEqual(vocab_keys[1], '''</s>''' ) self.assertEqual(vocab_keys[-1], '''v''' ) self.assertEqual(len(_lowerCamelCase ), 11_03 ) def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size, 11_03 ) def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) __A = self.tokenizer_class.from_pretrained(self.tmpdirname ) __A = ( '''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important''' ''' </s> <pad> <pad> <pad>''' ) __A = rust_tokenizer([raw_input_str], return_tensors=_lowerCamelCase, add_special_tokens=_lowerCamelCase ).input_ids[0] __A = py_tokenizer([raw_input_str], return_tensors=_lowerCamelCase, add_special_tokens=_lowerCamelCase ).input_ids[0] self.assertListEqual(_lowerCamelCase, _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' __A = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word __A = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.''' __A = [2, 4_13, 6_15, 1_14, 3, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1] __A = tokenizer([raw_input_str], return_tensors=_lowerCamelCase ).input_ids[0] self.assertListEqual(_lowerCamelCase, _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 9_61_03 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 1_03 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_05 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 10_24 __A = '''To ensure a smooth flow of bank resolutions.''' __A = [4_13, 6_15, 1_14, 22_91, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1] __A = tokenizer([raw_input_str], return_tensors=_lowerCamelCase ).input_ids[0] self.assertListEqual(_lowerCamelCase, _lowerCamelCase ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = ['''This is going to be way too long.''' * 1_50, '''short example'''] __A = ['''not super long but more than 5 tokens''', '''tiny'''] __A = self._large_tokenizer(_lowerCamelCase, padding=_lowerCamelCase, truncation=_lowerCamelCase, return_tensors='''pt''' ) __A = self._large_tokenizer( text_target=_lowerCamelCase, max_length=5, padding=_lowerCamelCase, truncation=_lowerCamelCase, return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 10_24) assert batch.attention_mask.shape == (2, 10_24) assert targets["input_ids"].shape == (2, 5) assert len(_lowerCamelCase ) == 2 # input_ids, attention_mask. @slow def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' # fmt: off __A = {'''input_ids''': [[3_89_79, 1_43, 1_84_85, 6_06, 1_30, 2_66_69, 8_76_86, 1_21, 5_41_89, 11_29, 1_11, 2_66_69, 8_76_86, 1_21, 91_14, 1_47_87, 1_21, 1_32_49, 1_58, 5_92, 9_56, 1_21, 1_46_21, 3_15_76, 1_43, 6_26_13, 1_08, 96_88, 9_30, 4_34_30, 1_15_62, 6_26_13, 3_04, 1_08, 1_14_43, 8_97, 1_08, 93_14, 1_74_15, 6_33_99, 1_08, 1_14_43, 76_14, 1_83_16, 1_18, 42_84, 71_48, 1_24_30, 1_43, 14_00, 2_57_03, 1_58, 1_11, 42_84, 71_48, 1_17_72, 1_43, 2_12_97, 10_64, 1_58, 1_22, 2_04, 35_06, 17_54, 11_33, 1_47_87, 15_81, 1_15, 3_32_24, 44_82, 1_11, 13_55, 1_10, 2_91_73, 3_17, 5_08_33, 1_08, 2_01_47, 9_46_65, 1_11, 7_71_98, 1_07, 1], [1_10, 6_26_13, 1_17, 6_38, 1_12, 11_33, 1_21, 2_00_98, 13_55, 7_90_50, 1_38_72, 1_35, 15_96, 5_35_41, 13_52, 1_41, 1_30_39, 55_42, 1_24, 3_02, 5_18, 1_11, 2_68, 29_56, 1_15, 1_49, 44_27, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_39, 12_35, 27_99, 1_82_89, 1_77_80, 2_04, 1_09, 94_74, 12_96, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_lowerCamelCase, model_name='''google/bigbird-pegasus-large-arxiv''', revision='''ba85d0851d708441f91440d509690f1ab6353415''', ) @require_sentencepiece @require_tokenizers class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : str = PegasusTokenizer A_ : Union[str, Any] = PegasusTokenizerFast A_ : Any = True A_ : str = True def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing __A = PegasusTokenizer(_lowerCamelCase, offset=0, mask_token_sent=_lowerCamelCase, mask_token='''[MASK]''' ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' ) def _SCREAMING_SNAKE_CASE ( self : Optional[int], **_lowerCamelCase : Dict ): '''simple docstring''' return PegasusTokenizer.from_pretrained(self.tmpdirname, **_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : List[str] ): '''simple docstring''' return ("This is a test", "This is a test") def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' __A = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) __A = self.tokenizer_class.from_pretrained(self.tmpdirname ) __A = ( '''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>''' ''' <pad> <pad> <pad>''' ) __A = rust_tokenizer([raw_input_str], return_tensors=_lowerCamelCase, add_special_tokens=_lowerCamelCase ).input_ids[0] __A = py_tokenizer([raw_input_str], return_tensors=_lowerCamelCase, add_special_tokens=_lowerCamelCase ).input_ids[0] self.assertListEqual(_lowerCamelCase, _lowerCamelCase ) @require_torch def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' __A = ['''This is going to be way too long.''' * 10_00, '''short example'''] __A = ['''not super long but more than 5 tokens''', '''tiny'''] __A = self._large_tokenizer(_lowerCamelCase, padding=_lowerCamelCase, truncation=_lowerCamelCase, return_tensors='''pt''' ) __A = self._large_tokenizer( text_target=_lowerCamelCase, max_length=5, padding=_lowerCamelCase, truncation=_lowerCamelCase, return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 40_96) assert batch.attention_mask.shape == (2, 40_96) assert targets["input_ids"].shape == (2, 5) assert len(_lowerCamelCase ) == 2 # input_ids, attention_mask. def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' __A = ( '''This is an example string that is used to test the original TF implementation against the HF''' ''' implementation''' ) __A = self._large_tokenizer(_lowerCamelCase ).input_ids self.assertListEqual( _lowerCamelCase, [1_82, 1_17, 1_42, 5_87, 42_11, 1_20, 1_17, 2_63, 1_12, 8_04, 1_09, 8_56, 2_50_16, 31_37, 4_64, 1_09, 2_69_55, 31_37, 1], )
266
1
"""simple docstring""" from string import ascii_uppercase lowercase_ = {char: i for i, char in enumerate(ascii_uppercase)} lowercase_ = dict(enumerate(ascii_uppercase)) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" __A = len(__UpperCamelCase ) __A = 0 while True: if x == i: __A = 0 if len(__UpperCamelCase ) == len(__UpperCamelCase ): break key += key[i] i += 1 return key def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" __A = '''''' __A = 0 for letter in message: if letter == " ": cipher_text += " " else: __A = (dicta[letter] - dicta[key_new[i]]) % 2_6 i += 1 cipher_text += dicta[x] return cipher_text def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" __A = '''''' __A = 0 for letter in cipher_text: if letter == " ": or_txt += " " else: __A = (dicta[letter] + dicta[key_new[i]] + 2_6) % 2_6 i += 1 or_txt += dicta[x] return or_txt def lowerCAmelCase ( ): """simple docstring""" __A = '''THE GERMAN ATTACK''' __A = '''SECRET''' __A = generate_key(__UpperCamelCase , __UpperCamelCase ) __A = cipher_text(__UpperCamelCase , __UpperCamelCase ) print(f'Encrypted Text = {s}' ) print(f'Original Text = {original_text(__UpperCamelCase , __UpperCamelCase )}' ) if __name__ == "__main__": import doctest doctest.testmod() main()
266
"""simple docstring""" import re def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" return [char.split() for char in re.split(r'''[^ a-z A-Z 0-9 \s]''' , str_ )] def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = split_input(str_ ) return "".join( [''''''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" try: __A = split_input(__UpperCamelCase ) if upper: __A = ''''''.join( [ separator.join([char.upper() for char in sub_str] ) for sub_str in string_split ] ) else: __A = ''''''.join( [ separator.join([char.lower() for char in sub_str] ) for sub_str in string_split ] ) return res_str except IndexError: return "not valid string" def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" return to_simple_case(__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" try: __A = to_simple_case(__UpperCamelCase ) return res_str[0].lower() + res_str[1:] except IndexError: return "not valid string" def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" return to_complex_case(__UpperCamelCase , __UpperCamelCase , '''_''' ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" return to_complex_case(__UpperCamelCase , __UpperCamelCase , '''-''' ) if __name__ == "__main__": __import__('doctest').testmod()
266
1
"""simple docstring""" import unittest from diffusers.models.unet_ad_blocks import * # noqa F403 from diffusers.utils import torch_device from .test_unet_blocks_common import UNetBlockTesterMixin class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : str = DownBlockaD # noqa F405 A_ : Optional[int] = "down" def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = [-0.02_32, -0.98_69, 0.80_54, -0.06_37, -0.16_88, -1.42_64, 0.44_70, -1.33_94, 0.09_04] super().test_output(_lowerCamelCase ) class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : int = ResnetDownsampleBlockaD # noqa F405 A_ : List[str] = "down" def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' __A = [0.07_10, 0.24_10, -0.73_20, -1.07_57, -1.13_43, 0.35_40, -0.01_33, -0.25_76, 0.09_48] super().test_output(_lowerCamelCase ) class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : Any = AttnDownBlockaD # noqa F405 A_ : List[Any] = "down" def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' __A = [0.06_36, 0.89_64, -0.62_34, -1.01_31, 0.08_44, 0.49_35, 0.34_37, 0.09_11, -0.29_57] super().test_output(_lowerCamelCase ) class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : List[str] = CrossAttnDownBlockaD # noqa F405 A_ : Dict = "down" def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A , __A = super().prepare_init_args_and_inputs_for_common() __A = 32 return init_dict, inputs_dict def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = [0.22_38, -0.73_96, -0.22_55, -0.38_29, 0.19_25, 1.16_65, 0.06_03, -0.72_95, 0.19_83] super().test_output(_lowerCamelCase ) class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : int = SimpleCrossAttnDownBlockaD # noqa F405 A_ : List[Any] = "down" @property def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' return super().get_dummy_input(include_encoder_hidden_states=_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' __A , __A = super().prepare_init_args_and_inputs_for_common() __A = 32 return init_dict, inputs_dict @unittest.skipIf(torch_device == '''mps''', '''MPS result is not consistent''' ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = [0.79_21, -0.09_92, -0.19_62, -0.76_95, -0.42_42, 0.78_04, 0.47_37, 0.27_65, 0.33_38] super().test_output(_lowerCamelCase ) class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : Tuple = SkipDownBlockaD # noqa F405 A_ : Dict = "down" @property def _SCREAMING_SNAKE_CASE ( self : List[str] ): '''simple docstring''' return super().get_dummy_input(include_skip_sample=_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' __A = [-0.08_45, -0.20_87, -0.24_65, 0.09_71, 0.19_00, -0.04_84, 0.26_64, 0.41_79, 0.50_69] super().test_output(_lowerCamelCase ) class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : str = AttnSkipDownBlockaD # noqa F405 A_ : Dict = "down" @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' return super().get_dummy_input(include_skip_sample=_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' __A = [0.55_39, 0.16_09, 0.49_24, 0.05_37, -0.19_95, 0.40_50, 0.09_79, -0.27_21, -0.06_42] super().test_output(_lowerCamelCase ) class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : Optional[Any] = DownEncoderBlockaD # noqa F405 A_ : Dict = "down" @property def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' return super().get_dummy_input(include_temb=_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' __A = { '''in_channels''': 32, '''out_channels''': 32, } __A = self.dummy_input return init_dict, inputs_dict def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' __A = [1.11_02, 0.53_02, 0.48_72, -0.00_23, -0.80_42, 0.04_83, -0.34_89, -0.56_32, 0.76_26] super().test_output(_lowerCamelCase ) class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : str = AttnDownEncoderBlockaD # noqa F405 A_ : Optional[int] = "down" @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' return super().get_dummy_input(include_temb=_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' __A = { '''in_channels''': 32, '''out_channels''': 32, } __A = self.dummy_input return init_dict, inputs_dict def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' __A = [0.89_66, -0.14_86, 0.85_68, 0.81_41, -0.90_46, -0.13_42, -0.09_72, -0.74_17, 0.15_38] super().test_output(_lowerCamelCase ) class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : List[str] = UNetMidBlockaD # noqa F405 A_ : Tuple = "mid" def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' __A = { '''in_channels''': 32, '''temb_channels''': 1_28, } __A = self.dummy_input return init_dict, inputs_dict def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = [-0.10_62, 1.72_48, 0.34_94, 1.45_69, -0.09_10, -1.24_21, -0.99_84, 0.67_36, 1.00_28] super().test_output(_lowerCamelCase ) class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : int = UNetMidBlockaDCrossAttn # noqa F405 A_ : str = "mid" def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A , __A = super().prepare_init_args_and_inputs_for_common() __A = 32 return init_dict, inputs_dict def _SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' __A = [0.01_87, 2.42_20, 0.44_84, 1.12_03, -0.61_21, -1.51_22, -0.82_70, 0.78_51, 1.83_35] super().test_output(_lowerCamelCase ) class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : int = UNetMidBlockaDSimpleCrossAttn # noqa F405 A_ : Union[str, Any] = "mid" @property def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' return super().get_dummy_input(include_encoder_hidden_states=_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A , __A = super().prepare_init_args_and_inputs_for_common() __A = 32 return init_dict, inputs_dict def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = [0.71_43, 1.99_74, 0.54_48, 1.39_77, 0.12_82, -1.12_37, -1.42_38, 0.55_30, 0.88_80] super().test_output(_lowerCamelCase ) class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : int = UpBlockaD # noqa F405 A_ : Tuple = "up" @property def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' __A = [-0.20_41, -0.41_65, -0.30_22, 0.00_41, -0.66_28, -0.70_53, 0.19_28, -0.03_25, 0.05_23] super().test_output(_lowerCamelCase ) class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : Any = ResnetUpsampleBlockaD # noqa F405 A_ : Any = "up" @property def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : List[str] ): '''simple docstring''' __A = [0.22_87, 0.35_49, -0.13_46, 0.47_97, -0.17_15, -0.96_49, 0.73_05, -0.58_64, -0.62_44] super().test_output(_lowerCamelCase ) class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : List[Any] = CrossAttnUpBlockaD # noqa F405 A_ : Optional[Any] = "up" @property def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A , __A = super().prepare_init_args_and_inputs_for_common() __A = 32 return init_dict, inputs_dict def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' __A = [-0.14_03, -0.35_15, -0.04_20, -0.14_25, 0.31_67, 0.50_94, -0.21_81, 0.59_31, 0.55_82] super().test_output(_lowerCamelCase ) class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : int = SimpleCrossAttnUpBlockaD # noqa F405 A_ : Any = "up" @property def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCamelCase, include_encoder_hidden_states=_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A , __A = super().prepare_init_args_and_inputs_for_common() __A = 32 return init_dict, inputs_dict def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = [0.26_45, 0.14_80, 0.09_09, 0.80_44, -0.97_58, -0.90_83, 0.09_94, -1.14_53, -0.74_02] super().test_output(_lowerCamelCase ) class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : Optional[int] = AttnUpBlockaD # noqa F405 A_ : Optional[int] = "up" @property def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCamelCase ) @unittest.skipIf(torch_device == '''mps''', '''MPS result is not consistent''' ) def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' __A = [0.09_79, 0.13_26, 0.00_21, 0.06_59, 0.22_49, 0.00_59, 0.11_32, 0.59_52, 0.10_33] super().test_output(_lowerCamelCase ) class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : Optional[int] = SkipUpBlockaD # noqa F405 A_ : int = "up" @property def _SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' __A = [-0.08_93, -0.12_34, -0.15_06, -0.03_32, 0.01_23, -0.02_11, 0.05_66, 0.01_43, 0.03_62] super().test_output(_lowerCamelCase ) class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : Optional[int] = AttnSkipUpBlockaD # noqa F405 A_ : Any = "up" @property def _SCREAMING_SNAKE_CASE ( self : List[str] ): '''simple docstring''' return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = [0.03_61, 0.06_17, 0.27_87, -0.03_50, 0.03_42, 0.34_21, -0.08_43, 0.09_13, 0.30_15] super().test_output(_lowerCamelCase ) class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : Tuple = UpDecoderBlockaD # noqa F405 A_ : int = "up" @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' return super().get_dummy_input(include_temb=_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : List[str] ): '''simple docstring''' __A = {'''in_channels''': 32, '''out_channels''': 32} __A = self.dummy_input return init_dict, inputs_dict def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' __A = [0.44_04, 0.19_98, -0.98_86, -0.33_20, -0.31_28, -0.70_34, -0.69_55, -0.23_38, -0.31_37] super().test_output(_lowerCamelCase ) class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : Tuple = AttnUpDecoderBlockaD # noqa F405 A_ : Optional[Any] = "up" @property def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' return super().get_dummy_input(include_temb=_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' __A = {'''in_channels''': 32, '''out_channels''': 32} __A = self.dummy_input return init_dict, inputs_dict def _SCREAMING_SNAKE_CASE ( self : List[str] ): '''simple docstring''' __A = [0.67_38, 0.44_91, 0.10_55, 1.07_10, 0.73_16, 0.33_39, 0.33_52, 0.10_23, 0.35_68] super().test_output(_lowerCamelCase )
266
"""simple docstring""" from __future__ import annotations class snake_case : '''simple docstring''' def __init__( self : int, _lowerCamelCase : List[Any]=None ): '''simple docstring''' __A = data __A = None def __repr__( self : Union[str, Any] ): '''simple docstring''' __A = [] __A = self while temp: string_rep.append(f'{temp.data}' ) __A = temp.next return "->".join(_lowerCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" if not elements_list: raise Exception('''The Elements List is empty''' ) __A = __A = Node(elements_list[0] ) for i in range(1 , len(__UpperCamelCase ) ): __A = Node(elements_list[i] ) __A = current.next return head def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" if head_node is not None and isinstance(__UpperCamelCase , __UpperCamelCase ): print_reverse(head_node.next ) print(head_node.data ) def lowerCAmelCase ( ): """simple docstring""" from doctest import testmod testmod() __A = make_linked_list([1_4, 5_2, 1_4, 1_2, 4_3] ) print('''Linked List:''' ) print(__UpperCamelCase ) print('''Elements in Reverse:''' ) print_reverse(__UpperCamelCase ) if __name__ == "__main__": main()
266
1
"""simple docstring""" class snake_case : '''simple docstring''' def __init__( self : Dict, _lowerCamelCase : list ): '''simple docstring''' __A = set_counts __A = max(_lowerCamelCase ) __A = len(_lowerCamelCase ) __A = [1] * num_sets __A = list(range(_lowerCamelCase ) ) def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : int, _lowerCamelCase : int ): '''simple docstring''' __A = self.get_parent(_lowerCamelCase ) __A = self.get_parent(_lowerCamelCase ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] __A = 0 __A = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 __A = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] __A = 0 __A = src_parent __A = self.set_counts[src_parent] __A = max(self.max_set, _lowerCamelCase ) return True def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : int ): '''simple docstring''' if self.parents[disj_set] == disj_set: return disj_set __A = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
266
"""simple docstring""" from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging lowercase_ = logging.get_logger(__name__) class snake_case ( _lowerCAmelCase ): '''simple docstring''' A_ : int = ["input_features", "attention_mask"] def __init__( self : Optional[Any], _lowerCamelCase : Union[str, Any]=80, _lowerCamelCase : int=1_60_00, _lowerCamelCase : Any=80, _lowerCamelCase : List[str]=0.0, _lowerCamelCase : int=True, _lowerCamelCase : Optional[Any]=True, _lowerCamelCase : Optional[int]=True, **_lowerCamelCase : List[str], ): '''simple docstring''' super().__init__(feature_size=_lowerCamelCase, sampling_rate=_lowerCamelCase, padding_value=_lowerCamelCase, **_lowerCamelCase ) __A = num_mel_bins __A = do_ceptral_normalize __A = normalize_means __A = normalize_vars __A = True def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : np.ndarray, ): '''simple docstring''' __A = waveform * (2**15) # Kaldi compliance: 16-bit signed integers __A = torch.from_numpy(_lowerCamelCase ).unsqueeze(0 ) __A = ta_kaldi.fbank(_lowerCamelCase, num_mel_bins=self.num_mel_bins, sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : np.ndarray, _lowerCamelCase : int, _lowerCamelCase : Optional[bool] = True, _lowerCamelCase : Optional[bool] = True, _lowerCamelCase : float = 0.0, ): '''simple docstring''' # make sure we normalize float32 arrays if normalize_means: __A = x[:input_length].mean(axis=0 ) __A = np.subtract(_lowerCamelCase, _lowerCamelCase ) if normalize_vars: __A = x[:input_length].std(axis=0 ) __A = np.divide(_lowerCamelCase, _lowerCamelCase ) if input_length < x.shape[0]: __A = padding_value # make sure array is in float32 __A = x.astype(np.floataa ) return x def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : List[np.ndarray], _lowerCamelCase : Optional[np.ndarray] = None ): '''simple docstring''' __A = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(_lowerCamelCase, _lowerCamelCase, self.normalize_means, self.normalize_vars, self.padding_value ) for x, n in zip(_lowerCamelCase, _lowerCamelCase ) ] def __call__( self : Optional[Any], _lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], _lowerCamelCase : Union[bool, str, PaddingStrategy] = False, _lowerCamelCase : Optional[int] = None, _lowerCamelCase : bool = False, _lowerCamelCase : Optional[int] = None, _lowerCamelCase : Optional[Union[str, TensorType]] = None, _lowerCamelCase : Optional[int] = None, _lowerCamelCase : Optional[bool] = None, **_lowerCamelCase : Optional[Any], ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of' f' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with' f' {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) __A = isinstance(_lowerCamelCase, np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'Only mono-channel audio is supported for input to {self}' ) __A = is_batched_numpy or ( isinstance(_lowerCamelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) )) ) if is_batched: __A = [np.asarray(_lowerCamelCase, dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(_lowerCamelCase, np.ndarray ): __A = np.asarray(_lowerCamelCase, dtype=np.floataa ) elif isinstance(_lowerCamelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __A = raw_speech.astype(np.floataa ) # always return batch if not is_batched: __A = [raw_speech] # extract fbank features __A = [self._extract_fbank_features(_lowerCamelCase ) for waveform in raw_speech] # convert into correct format for padding __A = BatchFeature({'''input_features''': features} ) __A = self.pad( _lowerCamelCase, padding=_lowerCamelCase, max_length=_lowerCamelCase, truncation=_lowerCamelCase, pad_to_multiple_of=_lowerCamelCase, return_attention_mask=_lowerCamelCase, **_lowerCamelCase, ) # make sure list is in array format __A = padded_inputs.get('''input_features''' ) if isinstance(input_features[0], _lowerCamelCase ): __A = [np.asarray(_lowerCamelCase, dtype=np.floataa ) for feature in input_features] __A = padded_inputs.get('''attention_mask''' ) if attention_mask is not None: __A = [np.asarray(_lowerCamelCase, dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: __A = ( np.array(_lowerCamelCase, dtype=np.intaa ) if self._get_padding_strategies(_lowerCamelCase, max_length=_lowerCamelCase ) is not PaddingStrategy.DO_NOT_PAD else None ) __A = self.normalize( padded_inputs['''input_features'''], attention_mask=_lowerCamelCase ) if return_tensors is not None: __A = padded_inputs.convert_to_tensors(_lowerCamelCase ) return padded_inputs
266
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) lowercase_ = { 'configuration_mobilevit': ['MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileViTConfig', 'MobileViTOnnxConfig'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ['MobileViTFeatureExtractor'] lowercase_ = ['MobileViTImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ 'MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'MobileViTForImageClassification', 'MobileViTForSemanticSegmentation', 'MobileViTModel', 'MobileViTPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ 'TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFMobileViTForImageClassification', 'TFMobileViTForSemanticSegmentation', 'TFMobileViTModel', 'TFMobileViTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilevit import MobileViTFeatureExtractor from .image_processing_mobilevit import MobileViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilevit import ( MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel, MobileViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilevit import ( TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileViTForImageClassification, TFMobileViTForSemanticSegmentation, TFMobileViTModel, TFMobileViTPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
266
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class snake_case ( unittest.TestCase ): '''simple docstring''' def __init__( self : List[str], _lowerCamelCase : Optional[Any], _lowerCamelCase : Union[str, Any]=13, _lowerCamelCase : Any=3, _lowerCamelCase : Optional[int]=2_24, _lowerCamelCase : str=30, _lowerCamelCase : Dict=4_00, _lowerCamelCase : Union[str, Any]=True, _lowerCamelCase : Any=None, _lowerCamelCase : Optional[Any]=True, _lowerCamelCase : Any=[0.5, 0.5, 0.5], _lowerCamelCase : List[str]=[0.5, 0.5, 0.5], ): '''simple docstring''' __A = size if size is not None else {'''height''': 18, '''width''': 18} __A = parent __A = batch_size __A = num_channels __A = image_size __A = min_resolution __A = max_resolution __A = do_resize __A = size __A = do_normalize __A = image_mean __A = image_std def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : str = ViTImageProcessor if is_vision_available() else None def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = EfficientFormerImageProcessorTester(self ) @property def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' return self.image_proc_tester.prepare_image_processor_dict() def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCamelCase, '''image_mean''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''image_std''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''do_normalize''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''do_resize''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''size''' ) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' # Initialize image_processor __A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __A = prepare_image_inputs(self.image_proc_tester, equal_resolution=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, Image.Image ) # Test not batched input __A = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), ) # Test batched __A = image_processor(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), ) def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' # Initialize image_processor __A = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __A = prepare_image_inputs(self.image_proc_tester, equal_resolution=_lowerCamelCase, numpify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, np.ndarray ) # Test not batched input __A = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), ) # Test batched __A = image_processor(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), ) def _SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' # Initialize image_processor __A = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __A = prepare_image_inputs(self.image_proc_tester, equal_resolution=_lowerCamelCase, torchify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, torch.Tensor ) # Test not batched input __A = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), ) # Test batched __A = image_processor(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), )
266
1
"""simple docstring""" from __future__ import annotations import math def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" if num <= 0: __A = f'{num}: Invalid input, please enter a positive integer.' raise ValueError(__UpperCamelCase ) __A = [True] * (num + 1) __A = [] __A = 2 __A = int(math.sqrt(__UpperCamelCase ) ) while start <= end: # If start is a prime if sieve[start] is True: prime.append(__UpperCamelCase ) # Set multiples of start be False for i in range(start * start , num + 1 , __UpperCamelCase ): if sieve[i] is True: __A = False start += 1 for j in range(end + 1 , num + 1 ): if sieve[j] is True: prime.append(__UpperCamelCase ) return prime if __name__ == "__main__": print(prime_sieve(int(input('Enter a positive integer: ').strip())))
266
"""simple docstring""" import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor lowercase_ = logging.get_logger(__name__) class snake_case ( _lowerCAmelCase ): '''simple docstring''' def __init__( self : Optional[int], *_lowerCamelCase : Union[str, Any], **_lowerCamelCase : Dict ): '''simple docstring''' warnings.warn( '''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use SegformerImageProcessor instead.''', _lowerCamelCase, ) super().__init__(*_lowerCamelCase, **_lowerCamelCase )
266
1
"""simple docstring""" from math import factorial def lowerCAmelCase ( __UpperCamelCase = 2_0 ): """simple docstring""" __A = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1, # 2, 3,... __A = n // 2 return int(factorial(__UpperCamelCase ) / (factorial(__UpperCamelCase ) * factorial(n - k )) ) if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution(20)) else: try: lowercase_ = int(sys.argv[1]) print(solution(n)) except ValueError: print('Invalid entry - please enter a number.')
266
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class snake_case ( unittest.TestCase ): '''simple docstring''' def __init__( self : List[Any], _lowerCamelCase : int, _lowerCamelCase : List[Any]=7, _lowerCamelCase : int=3, _lowerCamelCase : Optional[Any]=18, _lowerCamelCase : Any=30, _lowerCamelCase : str=4_00, _lowerCamelCase : int=True, _lowerCamelCase : Union[str, Any]=None, _lowerCamelCase : str=True, ): '''simple docstring''' __A = size if size is not None else {'''height''': 18, '''width''': 18} __A = parent __A = batch_size __A = num_channels __A = image_size __A = min_resolution __A = max_resolution __A = do_resize __A = size __A = apply_ocr def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : Optional[int] = LayoutLMvaImageProcessor if is_pytesseract_available() else None def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = LayoutLMvaImageProcessingTester(self ) @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' __A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCamelCase, '''do_resize''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''size''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''apply_ocr''' ) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' __A = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size, {'''height''': 18, '''width''': 18} ) __A = self.image_processing_class.from_dict(self.image_processor_dict, size=42 ) self.assertEqual(image_processor.size, {'''height''': 42, '''width''': 42} ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' # Initialize image_processing __A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, Image.Image ) # Test not batched input __A = image_processing(image_inputs[0], return_tensors='''pt''' ) self.assertEqual( encoding.pixel_values.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) self.assertIsInstance(encoding.words, _lowerCamelCase ) self.assertIsInstance(encoding.boxes, _lowerCamelCase ) # Test batched __A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' # Initialize image_processing __A = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase, numpify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, np.ndarray ) # Test not batched input __A = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) # Test batched __A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' # Initialize image_processing __A = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase, torchify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, torch.Tensor ) # Test not batched input __A = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) # Test batched __A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) def _SCREAMING_SNAKE_CASE ( self : List[str] ): '''simple docstring''' # with apply_OCR = True __A = LayoutLMvaImageProcessor() from datasets import load_dataset __A = load_dataset('''hf-internal-testing/fixtures_docvqa''', split='''test''' ) __A = Image.open(ds[0]['''file'''] ).convert('''RGB''' ) __A = image_processing(_lowerCamelCase, return_tensors='''pt''' ) self.assertEqual(encoding.pixel_values.shape, (1, 3, 2_24, 2_24) ) self.assertEqual(len(encoding.words ), len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 __A = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231 __A = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words, _lowerCamelCase ) self.assertListEqual(encoding.boxes, _lowerCamelCase ) # with apply_OCR = False __A = LayoutLMvaImageProcessor(apply_ocr=_lowerCamelCase ) __A = image_processing(_lowerCamelCase, return_tensors='''pt''' ) self.assertEqual(encoding.pixel_values.shape, (1, 3, 2_24, 2_24) )
266
1
"""simple docstring""" from __future__ import annotations from decimal import Decimal from numpy import array def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = Decimal # Check if the provided matrix has 2 rows and 2 columns # since this implementation only works for 2x2 matrices if len(__UpperCamelCase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2: # Calculate the determinant of the matrix __A = float( d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) ) if determinant == 0: raise ValueError('''This matrix has no inverse.''' ) # Creates a copy of the matrix with swapped positions of the elements __A = [[0.0, 0.0], [0.0, 0.0]] __A , __A = matrix[1][1], matrix[0][0] __A , __A = -matrix[1][0], -matrix[0][1] # Calculate the inverse of the matrix return [ [(float(d(__UpperCamelCase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix ] elif ( len(__UpperCamelCase ) == 3 and len(matrix[0] ) == 3 and len(matrix[1] ) == 3 and len(matrix[2] ) == 3 ): # Calculate the determinant of the matrix using Sarrus rule __A = float( ( (d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] )) + (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] )) + (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] )) ) - ( (d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] )) + (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] )) + (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] )) ) ) if determinant == 0: raise ValueError('''This matrix has no inverse.''' ) # Creating cofactor matrix __A = [ [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], ] __A = (d(matrix[1][1] ) * d(matrix[2][2] )) - ( d(matrix[1][2] ) * d(matrix[2][1] ) ) __A = -( (d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] )) ) __A = (d(matrix[1][0] ) * d(matrix[2][1] )) - ( d(matrix[1][1] ) * d(matrix[2][0] ) ) __A = -( (d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] )) ) __A = (d(matrix[0][0] ) * d(matrix[2][2] )) - ( d(matrix[0][2] ) * d(matrix[2][0] ) ) __A = -( (d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] )) ) __A = (d(matrix[0][1] ) * d(matrix[1][2] )) - ( d(matrix[0][2] ) * d(matrix[1][1] ) ) __A = -( (d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] )) ) __A = (d(matrix[0][0] ) * d(matrix[1][1] )) - ( d(matrix[0][1] ) * d(matrix[1][0] ) ) # Transpose the cofactor matrix (Adjoint matrix) __A = array(__UpperCamelCase ) for i in range(3 ): for j in range(3 ): __A = cofactor_matrix[j][i] # Inverse of the matrix using the formula (1/determinant) * adjoint matrix __A = array(__UpperCamelCase ) for i in range(3 ): for j in range(3 ): inverse_matrix[i][j] /= d(__UpperCamelCase ) # Calculate the inverse of the matrix return [[float(d(__UpperCamelCase ) ) or 0.0 for n in row] for row in inverse_matrix] raise ValueError('''Please provide a matrix of size 2x2 or 3x3.''' )
266
"""simple docstring""" import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class snake_case ( ctypes.Structure ): '''simple docstring''' A_ : List[str] = [("size", ctypes.c_int), ("visible", ctypes.c_byte)] def lowerCAmelCase ( ): """simple docstring""" if os.name == "nt": __A = CursorInfo() __A = ctypes.windll.kernelaa.GetStdHandle(-1_1 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) ) __A = False ctypes.windll.kernelaa.SetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) ) elif os.name == "posix": sys.stdout.write('''\033[?25l''' ) sys.stdout.flush() def lowerCAmelCase ( ): """simple docstring""" if os.name == "nt": __A = CursorInfo() __A = ctypes.windll.kernelaa.GetStdHandle(-1_1 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) ) __A = True ctypes.windll.kernelaa.SetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) ) elif os.name == "posix": sys.stdout.write('''\033[?25h''' ) sys.stdout.flush() @contextmanager def lowerCAmelCase ( ): """simple docstring""" try: hide_cursor() yield finally: show_cursor()
266
1
"""simple docstring""" import math from collections.abc import Iterator from itertools import takewhile def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__UpperCamelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowerCAmelCase ( ): """simple docstring""" __A = 2 while True: if is_prime(__UpperCamelCase ): yield num num += 1 def lowerCAmelCase ( __UpperCamelCase = 2_0_0_0_0_0_0 ): """simple docstring""" return sum(takewhile(lambda __UpperCamelCase : x < n , prime_generator() ) ) if __name__ == "__main__": print(F'''{solution() = }''')
266
"""simple docstring""" import argparse import struct import unittest class snake_case : '''simple docstring''' def __init__( self : Optional[int], _lowerCamelCase : bytes ): '''simple docstring''' __A = data # Initialize hash values __A = [ 0X6a_09e_667, 0Xbb_67a_e85, 0X3c_6ef_372, 0Xa5_4ff_53a, 0X51_0e5_27f, 0X9b_056_88c, 0X1f_83d_9ab, 0X5b_e0c_d19, ] # Initialize round constants __A = [ 0X42_8a2_f98, 0X71_374_491, 0Xb5_c0f_bcf, 0Xe9_b5d_ba5, 0X39_56c_25b, 0X59_f11_1f1, 0X92_3f8_2a4, 0Xab_1c5_ed5, 0Xd8_07a_a98, 0X12_835_b01, 0X24_318_5be, 0X55_0c7_dc3, 0X72_be5_d74, 0X80_deb_1fe, 0X9b_dc0_6a7, 0Xc1_9bf_174, 0Xe4_9b6_9c1, 0Xef_be4_786, 0X0f_c19_dc6, 0X24_0ca_1cc, 0X2d_e92_c6f, 0X4a_748_4aa, 0X5c_b0a_9dc, 0X76_f98_8da, 0X98_3e5_152, 0Xa8_31c_66d, 0Xb0_032_7c8, 0Xbf_597_fc7, 0Xc6_e00_bf3, 0Xd5_a79_147, 0X06_ca6_351, 0X14_292_967, 0X27_b70_a85, 0X2e_1b2_138, 0X4d_2c6_dfc, 0X53_380_d13, 0X65_0a7_354, 0X76_6a0_abb, 0X81_c2c_92e, 0X92_722_c85, 0Xa2_bfe_8a1, 0Xa8_1a6_64b, 0Xc2_4b8_b70, 0Xc7_6c5_1a3, 0Xd1_92e_819, 0Xd6_990_624, 0Xf4_0e3_585, 0X10_6aa_070, 0X19_a4c_116, 0X1e_376_c08, 0X27_487_74c, 0X34_b0b_cb5, 0X39_1c0_cb3, 0X4e_d8a_a4a, 0X5b_9cc_a4f, 0X68_2e6_ff3, 0X74_8f8_2ee, 0X78_a56_36f, 0X84_c87_814, 0X8c_c70_208, 0X90_bef_ffa, 0Xa4_506_ceb, 0Xbe_f9a_3f7, 0Xc6_717_8f2, ] __A = self.preprocessing(self.data ) self.final_hash() @staticmethod def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : bytes ): '''simple docstring''' __A = b'''\x80''' + (b'''\x00''' * (63 - (len(_lowerCamelCase ) + 8) % 64)) __A = struct.pack('''>Q''', (len(_lowerCamelCase ) * 8) ) return data + padding + big_endian_integer def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' # Convert into blocks of 64 bytes __A = [ self.preprocessed_data[x : x + 64] for x in range(0, len(self.preprocessed_data ), 64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers __A = list(struct.unpack('''>16L''', _lowerCamelCase ) ) # add 48 0-ed integers words += [0] * 48 __A , __A , __A , __A , __A , __A , __A , __A = self.hashes for index in range(0, 64 ): if index > 15: # modify the zero-ed indexes at the end of the array __A = ( self.ror(words[index - 15], 7 ) ^ self.ror(words[index - 15], 18 ) ^ (words[index - 15] >> 3) ) __A = ( self.ror(words[index - 2], 17 ) ^ self.ror(words[index - 2], 19 ) ^ (words[index - 2] >> 10) ) __A = ( words[index - 16] + sa + words[index - 7] + sa ) % 0X100_000_000 # Compression __A = self.ror(_lowerCamelCase, 6 ) ^ self.ror(_lowerCamelCase, 11 ) ^ self.ror(_lowerCamelCase, 25 ) __A = (e & f) ^ ((~e & 0Xff_fff_fff) & g) __A = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0X100_000_000 __A = self.ror(_lowerCamelCase, 2 ) ^ self.ror(_lowerCamelCase, 13 ) ^ self.ror(_lowerCamelCase, 22 ) __A = (a & b) ^ (a & c) ^ (b & c) __A = (sa + maj) % 0X100_000_000 __A , __A , __A , __A , __A , __A , __A , __A = ( g, f, e, ((d + tempa) % 0X100_000_000), c, b, a, ((tempa + tempa) % 0X100_000_000), ) __A = [a, b, c, d, e, f, g, h] # Modify final values __A = [ ((element + mutated_hash_values[index]) % 0X100_000_000) for index, element in enumerate(self.hashes ) ] __A = ''''''.join([hex(_lowerCamelCase )[2:].zfill(8 ) for value in self.hashes] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : int, _lowerCamelCase : int ): '''simple docstring''' return 0Xff_fff_fff & (value << (32 - rotations)) | (value >> rotations) class snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' import hashlib __A = bytes('''Test String''', '''utf-8''' ) self.assertEqual(SHAaaa(_lowerCamelCase ).hash, hashlib.shaaaa(_lowerCamelCase ).hexdigest() ) def lowerCAmelCase ( ): """simple docstring""" import doctest doctest.testmod() __A = argparse.ArgumentParser() parser.add_argument( '''-s''' , '''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , ) parser.add_argument( '''-f''' , '''--file''' , dest='''input_file''' , help='''Hash contents of a file''' ) __A = parser.parse_args() __A = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , '''rb''' ) as f: __A = f.read() else: __A = bytes(__UpperCamelCase , '''utf-8''' ) print(SHAaaa(__UpperCamelCase ).hash ) if __name__ == "__main__": main()
266
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available lowercase_ = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ['BartphoTokenizer'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
266
"""simple docstring""" import faiss # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import requests # noqa: F401 # Here to have a nice missing dependency error message early on import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on from mauve import compute_mauve # From: mauve-text import datasets lowercase_ = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n' lowercase_ = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n' lowercase_ = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case ( datasets.Metric ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, homepage='''https://github.com/krishnap25/mauve''', inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { '''predictions''': datasets.Value('''string''', id='''sequence''' ), '''references''': datasets.Value('''string''', id='''sequence''' ), } ), codebase_urls=['''https://github.com/krishnap25/mauve'''], reference_urls=[ '''https://arxiv.org/abs/2102.01454''', '''https://github.com/krishnap25/mauve''', ], ) def _SCREAMING_SNAKE_CASE ( self : int, _lowerCamelCase : str, _lowerCamelCase : Optional[Any], _lowerCamelCase : Any=None, _lowerCamelCase : Tuple=None, _lowerCamelCase : Optional[Any]=None, _lowerCamelCase : Union[str, Any]=None, _lowerCamelCase : str="auto", _lowerCamelCase : Union[str, Any]=-1, _lowerCamelCase : List[str]=0.9, _lowerCamelCase : int=5, _lowerCamelCase : Tuple=5_00, _lowerCamelCase : Union[str, Any]="gpt2-large", _lowerCamelCase : int=-1, _lowerCamelCase : Union[str, Any]=10_24, _lowerCamelCase : Union[str, Any]=25, _lowerCamelCase : str=5, _lowerCamelCase : Any=True, _lowerCamelCase : Union[str, Any]=25, ): '''simple docstring''' __A = compute_mauve( p_text=_lowerCamelCase, q_text=_lowerCamelCase, p_features=_lowerCamelCase, q_features=_lowerCamelCase, p_tokens=_lowerCamelCase, q_tokens=_lowerCamelCase, num_buckets=_lowerCamelCase, pca_max_data=_lowerCamelCase, kmeans_explained_var=_lowerCamelCase, kmeans_num_redo=_lowerCamelCase, kmeans_max_iter=_lowerCamelCase, featurize_model_name=_lowerCamelCase, device_id=_lowerCamelCase, max_text_length=_lowerCamelCase, divergence_curve_discretization_size=_lowerCamelCase, mauve_scaling_factor=_lowerCamelCase, verbose=_lowerCamelCase, seed=_lowerCamelCase, ) return out
266
1
"""simple docstring""" from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING lowercase_ = logging.get_logger(__name__) lowercase_ = Dict[str, Any] lowercase_ = List[Prediction] @add_end_docstrings(_lowerCAmelCase ) class snake_case ( _lowerCAmelCase ): '''simple docstring''' def __init__( self : Dict, *_lowerCamelCase : Any, **_lowerCamelCase : List[Any] ): '''simple docstring''' super().__init__(*_lowerCamelCase, **_lowerCamelCase ) if self.framework == "tf": raise ValueError(f'The {self.__class__} is only available in PyTorch.' ) requires_backends(self, '''vision''' ) self.check_model_type( dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any], **_lowerCamelCase : str ): '''simple docstring''' __A = {} if "threshold" in kwargs: __A = kwargs['''threshold'''] return {}, {}, postprocess_kwargs def __call__( self : List[str], *_lowerCamelCase : List[str], **_lowerCamelCase : Optional[int] ): '''simple docstring''' return super().__call__(*_lowerCamelCase, **_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : Dict ): '''simple docstring''' __A = load_image(_lowerCamelCase ) __A = torch.IntTensor([[image.height, image.width]] ) __A = self.image_processor(images=[image], return_tensors='''pt''' ) if self.tokenizer is not None: __A = self.tokenizer(text=inputs['''words'''], boxes=inputs['''boxes'''], return_tensors='''pt''' ) __A = target_size return inputs def _SCREAMING_SNAKE_CASE ( self : int, _lowerCamelCase : str ): '''simple docstring''' __A = model_inputs.pop('''target_size''' ) __A = self.model(**_lowerCamelCase ) __A = outputs.__class__({'''target_size''': target_size, **outputs} ) if self.tokenizer is not None: __A = model_inputs['''bbox'''] return model_outputs def _SCREAMING_SNAKE_CASE ( self : int, _lowerCamelCase : str, _lowerCamelCase : Optional[Any]=0.9 ): '''simple docstring''' __A = model_outputs['''target_size'''] if self.tokenizer is not None: # This is a LayoutLMForTokenClassification variant. # The OCR got the boxes and the model classified the words. __A , __A = target_size[0].tolist() def unnormalize(_lowerCamelCase : Tuple ): return self._get_bounding_box( torch.Tensor( [ (width * bbox[0] / 10_00), (height * bbox[1] / 10_00), (width * bbox[2] / 10_00), (height * bbox[3] / 10_00), ] ) ) __A , __A = model_outputs['''logits'''].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 ) __A = [self.model.config.idalabel[prediction] for prediction in classes.tolist()] __A = [unnormalize(_lowerCamelCase ) for bbox in model_outputs['''bbox'''].squeeze(0 )] __A = ['''score''', '''label''', '''box'''] __A = [dict(zip(_lowerCamelCase, _lowerCamelCase ) ) for vals in zip(scores.tolist(), _lowerCamelCase, _lowerCamelCase ) if vals[0] > threshold] else: # This is a regular ForObjectDetectionModel __A = self.image_processor.post_process_object_detection(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) __A = raw_annotations[0] __A = raw_annotation['''scores'''] __A = raw_annotation['''labels'''] __A = raw_annotation['''boxes'''] __A = scores.tolist() __A = [self.model.config.idalabel[label.item()] for label in labels] __A = [self._get_bounding_box(_lowerCamelCase ) for box in boxes] # {"scores": [...], ...} --> [{"score":x, ...}, ...] __A = ['''score''', '''label''', '''box'''] __A = [ dict(zip(_lowerCamelCase, _lowerCamelCase ) ) for vals in zip(raw_annotation['''scores'''], raw_annotation['''labels'''], raw_annotation['''boxes'''] ) ] return annotation def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : "torch.Tensor" ): '''simple docstring''' if self.framework != "pt": raise ValueError('''The ObjectDetectionPipeline is only available in PyTorch.''' ) __A , __A , __A , __A = box.int().tolist() __A = { '''xmin''': xmin, '''ymin''': ymin, '''xmax''': xmax, '''ymax''': ymax, } return bbox
266
"""simple docstring""" import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs lowercase_ = imread(R'digital_image_processing/image_data/lena_small.jpg') lowercase_ = cvtColor(img, COLOR_BGR2GRAY) def lowerCAmelCase ( ): """simple docstring""" __A = cn.convert_to_negative(__UpperCamelCase ) # assert negative_img array for at least one True assert negative_img.any() def lowerCAmelCase ( ): """simple docstring""" with Image.open('''digital_image_processing/image_data/lena_small.jpg''' ) as img: # Work around assertion for response assert str(cc.change_contrast(__UpperCamelCase , 1_1_0 ) ).startswith( '''<PIL.Image.Image image mode=RGB size=100x100 at''' ) def lowerCAmelCase ( ): """simple docstring""" __A = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def lowerCAmelCase ( ): """simple docstring""" __A = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0 ) # assert ambiguous array for all == True assert canny_img.all() __A = canny.canny(__UpperCamelCase ) # assert canny array for at least one True assert canny_array.any() def lowerCAmelCase ( ): """simple docstring""" assert gg.gaussian_filter(__UpperCamelCase , 5 , sigma=0.9 ).all() def lowerCAmelCase ( ): """simple docstring""" __A = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) __A = conv.img_convolve(__UpperCamelCase , __UpperCamelCase ).astype(__UpperCamelCase ) assert res.any() def lowerCAmelCase ( ): """simple docstring""" assert med.median_filter(__UpperCamelCase , 3 ).any() def lowerCAmelCase ( ): """simple docstring""" __A , __A = sob.sobel_filter(__UpperCamelCase ) assert grad.any() and theta.any() def lowerCAmelCase ( ): """simple docstring""" __A = sp.make_sepia(__UpperCamelCase , 2_0 ) assert sepia.all() def lowerCAmelCase ( __UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" ): """simple docstring""" __A = bs.Burkes(imread(__UpperCamelCase , 1 ) , 1_2_0 ) burkes.process() assert burkes.output_img.any() def lowerCAmelCase ( __UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" , ): """simple docstring""" __A = rs.NearestNeighbour(imread(__UpperCamelCase , 1 ) , 4_0_0 , 2_0_0 ) nn.process() assert nn.output.any() def lowerCAmelCase ( ): """simple docstring""" __A = '''digital_image_processing/image_data/lena.jpg''' # Reading the image and converting it to grayscale. __A = imread(__UpperCamelCase , 0 ) # Test for get_neighbors_pixel function() return not None __A = 0 __A = 0 __A = image[x_coordinate][y_coordinate] __A = lbp.get_neighbors_pixel( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image __A = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): __A = lbp.local_binary_value(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) assert lbp_image.any()
266
1
"""simple docstring""" import torch from transformers import AutoModel class snake_case ( torch.nn.Module ): '''simple docstring''' def __init__( self : Tuple, _lowerCamelCase : Dict="sayef/fsner-bert-base-uncased" ): '''simple docstring''' super(_lowerCamelCase, self ).__init__() __A = AutoModel.from_pretrained(_lowerCamelCase, return_dict=_lowerCamelCase ) __A = torch.nn.CosineSimilarity(3, 1e-08 ) __A = torch.nn.Softmax(dim=1 ) def _SCREAMING_SNAKE_CASE ( self : Any, **_lowerCamelCase : int ): '''simple docstring''' return self.bert(**_lowerCamelCase ).last_hidden_state def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : str ): '''simple docstring''' return token_embeddings.sum(2, keepdim=_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : List[str], _lowerCamelCase : Optional[Any], _lowerCamelCase : int=1 ): '''simple docstring''' return self.softmax(T * self.cos(_lowerCamelCase, _lowerCamelCase ) ) def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : List[str], _lowerCamelCase : int ): '''simple docstring''' __A = W_supports['''sizes'''].tolist() __A = W_supports['''start_token_id'''].item() __A = W_supports['''end_token_id'''].item() del W_supports["sizes"] del W_supports["start_token_id"] del W_supports["end_token_id"] __A = self.BERT(**_lowerCamelCase ) __A = self.BERT(**_lowerCamelCase ) __A = None __A = None __A = W_supports['''input_ids'''] == start_token_id __A = W_supports['''input_ids'''] == end_token_id for i, size in enumerate(_lowerCamelCase ): if i == 0: __A = 0 else: __A = support_sizes[i - 1] __A = S[s : s + size][start_token_masks[s : s + size]] __A = S[s : s + size][end_token_masks[s : s + size]] __A = torch.matmul(q[i], s_start.T ).sum(1 ).softmax(0 ) __A = torch.matmul(q[i], s_end.T ).sum(1 ).softmax(0 ) if p_starts is not None: __A = torch.vstack((p_starts, p_start) ) __A = torch.vstack((p_ends, p_end) ) else: __A = p_start __A = p_end return p_starts, p_ends
266
"""simple docstring""" import itertools import random import unittest import numpy as np from transformers import ASTFeatureExtractor from transformers.testing_utils import require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin lowercase_ = random.Random() if is_torch_available(): import torch def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase=1.0 , __UpperCamelCase=None , __UpperCamelCase=None ): """simple docstring""" if rng is None: __A = global_rng __A = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class snake_case ( unittest.TestCase ): '''simple docstring''' def __init__( self : Any, _lowerCamelCase : List[str], _lowerCamelCase : Any=7, _lowerCamelCase : Optional[int]=4_00, _lowerCamelCase : Optional[int]=20_00, _lowerCamelCase : Dict=1, _lowerCamelCase : Optional[Any]=0.0, _lowerCamelCase : int=1_60_00, _lowerCamelCase : Optional[int]=True, _lowerCamelCase : Dict=True, ): '''simple docstring''' __A = parent __A = batch_size __A = min_seq_length __A = max_seq_length __A = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) __A = feature_size __A = padding_value __A = sampling_rate __A = return_attention_mask __A = do_normalize def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : Optional[Any]=False, _lowerCamelCase : int=False ): '''simple docstring''' def _flatten(_lowerCamelCase : List[str] ): return list(itertools.chain(*_lowerCamelCase ) ) if equal_length: __A = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size __A = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff ) ] if numpify: __A = [np.asarray(_lowerCamelCase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : int = ASTFeatureExtractor def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' __A = ASTFeatureExtractionTester(self ) def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' # Tests that all call wrap to encode_plus and batch_encode_plus __A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 __A = [floats_list((1, x) )[0] for x in range(8_00, 14_00, 2_00 )] __A = [np.asarray(_lowerCamelCase ) for speech_input in speech_inputs] # Test not batched input __A = feat_extract(speech_inputs[0], return_tensors='''np''' ).input_values __A = feat_extract(np_speech_inputs[0], return_tensors='''np''' ).input_values self.assertTrue(np.allclose(_lowerCamelCase, _lowerCamelCase, atol=1e-3 ) ) # Test batched __A = feat_extract(_lowerCamelCase, padding=_lowerCamelCase, return_tensors='''np''' ).input_values __A = feat_extract(_lowerCamelCase, padding=_lowerCamelCase, return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(_lowerCamelCase, _lowerCamelCase ): self.assertTrue(np.allclose(_lowerCamelCase, _lowerCamelCase, atol=1e-3 ) ) # Test 2-D numpy arrays are batched. __A = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)] __A = np.asarray(_lowerCamelCase ) __A = feat_extract(_lowerCamelCase, return_tensors='''np''' ).input_values __A = feat_extract(_lowerCamelCase, return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(_lowerCamelCase, _lowerCamelCase ): self.assertTrue(np.allclose(_lowerCamelCase, _lowerCamelCase, atol=1e-3 ) ) @require_torch def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' import torch __A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __A = np.random.rand(1_00 ).astype(np.floataa ) __A = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: __A = feature_extractor.pad([{'''input_values''': inputs}], return_tensors='''np''' ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) __A = feature_extractor.pad([{'''input_values''': inputs}], return_tensors='''pt''' ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : Union[str, Any] ): '''simple docstring''' from datasets import load_dataset __A = load_dataset('''hf-internal-testing/librispeech_asr_dummy''', '''clean''', split='''validation''' ) # automatic decoding with librispeech __A = ds.sort('''id''' ).select(range(_lowerCamelCase ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] @require_torch def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' # fmt: off __A = torch.tensor( [-0.98_94, -1.27_76, -0.90_66, -1.27_76, -0.93_49, -1.26_09, -1.03_86, -1.27_76, -1.15_61, -1.27_76, -1.20_52, -1.27_23, -1.21_90, -1.21_32, -1.27_76, -1.11_33, -1.19_53, -1.13_43, -1.15_84, -1.22_03, -1.17_70, -1.24_74, -1.23_81, -1.19_36, -0.92_70, -0.83_17, -0.80_49, -0.77_06, -0.75_65, -0.78_69] ) # fmt: on __A = self._load_datasamples(1 ) __A = ASTFeatureExtractor() __A = feature_extractor(_lowerCamelCase, return_tensors='''pt''' ).input_values self.assertEquals(input_values.shape, (1, 10_24, 1_28) ) self.assertTrue(torch.allclose(input_values[0, 0, :30], _lowerCamelCase, atol=1e-4 ) )
266
1
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowercase_ = logging.get_logger(__name__) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" if isinstance(__UpperCamelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(__UpperCamelCase , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(__UpperCamelCase ): return [[videos]] raise ValueError(f'Could not make batched video from {videos}' ) class snake_case ( _lowerCAmelCase ): '''simple docstring''' A_ : Union[str, Any] = ["pixel_values"] def __init__( self : Optional[int], _lowerCamelCase : bool = True, _lowerCamelCase : Dict[str, int] = None, _lowerCamelCase : PILImageResampling = PILImageResampling.BILINEAR, _lowerCamelCase : bool = True, _lowerCamelCase : Dict[str, int] = None, _lowerCamelCase : bool = True, _lowerCamelCase : Union[int, float] = 1 / 2_55, _lowerCamelCase : bool = True, _lowerCamelCase : Optional[Union[float, List[float]]] = None, _lowerCamelCase : Optional[Union[float, List[float]]] = None, **_lowerCamelCase : List[str], ): '''simple docstring''' super().__init__(**_lowerCamelCase ) __A = size if size is not None else {'''shortest_edge''': 2_24} __A = get_size_dict(_lowerCamelCase, default_to_square=_lowerCamelCase ) __A = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24} __A = get_size_dict(_lowerCamelCase, param_name='''crop_size''' ) __A = do_resize __A = size __A = do_center_crop __A = crop_size __A = resample __A = do_rescale __A = rescale_factor __A = do_normalize __A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __A = image_std if image_std is not None else IMAGENET_STANDARD_STD def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : np.ndarray, _lowerCamelCase : Dict[str, int], _lowerCamelCase : PILImageResampling = PILImageResampling.BILINEAR, _lowerCamelCase : Optional[Union[str, ChannelDimension]] = None, **_lowerCamelCase : int, ): '''simple docstring''' __A = get_size_dict(_lowerCamelCase, default_to_square=_lowerCamelCase ) if "shortest_edge" in size: __A = get_resize_output_image_size(_lowerCamelCase, size['''shortest_edge'''], default_to_square=_lowerCamelCase ) elif "height" in size and "width" in size: __A = (size['''height'''], size['''width''']) else: raise ValueError(f'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' ) return resize(_lowerCamelCase, size=_lowerCamelCase, resample=_lowerCamelCase, data_format=_lowerCamelCase, **_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : int, _lowerCamelCase : np.ndarray, _lowerCamelCase : Dict[str, int], _lowerCamelCase : Optional[Union[str, ChannelDimension]] = None, **_lowerCamelCase : str, ): '''simple docstring''' __A = get_size_dict(_lowerCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' ) return center_crop(_lowerCamelCase, size=(size['''height'''], size['''width''']), data_format=_lowerCamelCase, **_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : np.ndarray, _lowerCamelCase : Union[int, float], _lowerCamelCase : Optional[Union[str, ChannelDimension]] = None, **_lowerCamelCase : List[Any], ): '''simple docstring''' return rescale(_lowerCamelCase, scale=_lowerCamelCase, data_format=_lowerCamelCase, **_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : np.ndarray, _lowerCamelCase : Union[float, List[float]], _lowerCamelCase : Union[float, List[float]], _lowerCamelCase : Optional[Union[str, ChannelDimension]] = None, **_lowerCamelCase : Union[str, Any], ): '''simple docstring''' return normalize(_lowerCamelCase, mean=_lowerCamelCase, std=_lowerCamelCase, data_format=_lowerCamelCase, **_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : int, _lowerCamelCase : ImageInput, _lowerCamelCase : bool = None, _lowerCamelCase : Dict[str, int] = None, _lowerCamelCase : PILImageResampling = None, _lowerCamelCase : bool = None, _lowerCamelCase : Dict[str, int] = None, _lowerCamelCase : bool = None, _lowerCamelCase : float = None, _lowerCamelCase : bool = None, _lowerCamelCase : Optional[Union[float, List[float]]] = None, _lowerCamelCase : Optional[Union[float, List[float]]] = None, _lowerCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST, ): '''simple docstring''' if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. __A = to_numpy_array(_lowerCamelCase ) if do_resize: __A = self.resize(image=_lowerCamelCase, size=_lowerCamelCase, resample=_lowerCamelCase ) if do_center_crop: __A = self.center_crop(_lowerCamelCase, size=_lowerCamelCase ) if do_rescale: __A = self.rescale(image=_lowerCamelCase, scale=_lowerCamelCase ) if do_normalize: __A = self.normalize(image=_lowerCamelCase, mean=_lowerCamelCase, std=_lowerCamelCase ) __A = to_channel_dimension_format(_lowerCamelCase, _lowerCamelCase ) return image def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : ImageInput, _lowerCamelCase : bool = None, _lowerCamelCase : Dict[str, int] = None, _lowerCamelCase : PILImageResampling = None, _lowerCamelCase : bool = None, _lowerCamelCase : Dict[str, int] = None, _lowerCamelCase : bool = None, _lowerCamelCase : float = None, _lowerCamelCase : bool = None, _lowerCamelCase : Optional[Union[float, List[float]]] = None, _lowerCamelCase : Optional[Union[float, List[float]]] = None, _lowerCamelCase : Optional[Union[str, TensorType]] = None, _lowerCamelCase : ChannelDimension = ChannelDimension.FIRST, **_lowerCamelCase : Optional[Any], ): '''simple docstring''' __A = do_resize if do_resize is not None else self.do_resize __A = resample if resample is not None else self.resample __A = do_center_crop if do_center_crop is not None else self.do_center_crop __A = do_rescale if do_rescale is not None else self.do_rescale __A = rescale_factor if rescale_factor is not None else self.rescale_factor __A = do_normalize if do_normalize is not None else self.do_normalize __A = image_mean if image_mean is not None else self.image_mean __A = image_std if image_std is not None else self.image_std __A = size if size is not None else self.size __A = get_size_dict(_lowerCamelCase, default_to_square=_lowerCamelCase ) __A = crop_size if crop_size is not None else self.crop_size __A = get_size_dict(_lowerCamelCase, param_name='''crop_size''' ) if not valid_images(_lowerCamelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) __A = make_batched(_lowerCamelCase ) __A = [ [ self._preprocess_image( image=_lowerCamelCase, do_resize=_lowerCamelCase, size=_lowerCamelCase, resample=_lowerCamelCase, do_center_crop=_lowerCamelCase, crop_size=_lowerCamelCase, do_rescale=_lowerCamelCase, rescale_factor=_lowerCamelCase, do_normalize=_lowerCamelCase, image_mean=_lowerCamelCase, image_std=_lowerCamelCase, data_format=_lowerCamelCase, ) for img in video ] for video in videos ] __A = {'''pixel_values''': videos} return BatchFeature(data=_lowerCamelCase, tensor_type=_lowerCamelCase )
266
"""simple docstring""" def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = current_set.copy() for row_index, row in enumerate(__UpperCamelCase ): __A = row[0] for column_index, column in enumerate(__UpperCamelCase ): if magnitude == 0: __A = column continue __A = column / magnitude # Subtract to cancel term __A = current_set[0] __A = [first_row] __A = current_set[1::] for row in current_set: __A = [] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(__UpperCamelCase ) continue for column_index in range(len(__UpperCamelCase ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(__UpperCamelCase ) # Create next recursion iteration set if len(final_set[0] ) != 3: __A = final_set[0] __A = [] __A = [] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) __A = simplify(__UpperCamelCase ) for i in range(len(__UpperCamelCase ) ): resultant[i].insert(0 , current_first_column[i] ) resultant.insert(0 , __UpperCamelCase ) __A = resultant return final_set def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" if len(__UpperCamelCase ) == 0: raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) __A = len(__UpperCamelCase ) + 1 if any(len(__UpperCamelCase ) != _length for item in equations ): raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) for row in equations: if any(not isinstance(__UpperCamelCase , (int, float) ) for column in row ): raise ValueError('''solve_simultaneous() requires lists of integers''' ) if len(__UpperCamelCase ) == 1: return [equations[0][-1] / equations[0][0]] __A = equations.copy() if any(0 in row for row in data_set ): __A = data_set.copy() __A = [] for row_index, row in enumerate(__UpperCamelCase ): if 0 not in row: __A = data_set.pop(__UpperCamelCase ) break if not full_row: raise ValueError('''solve_simultaneous() requires at least 1 full equation''' ) data_set.insert(0 , __UpperCamelCase ) __A = data_set.copy() __A = simplify(__UpperCamelCase ) __A = simplified[::-1] __A = [] for row in simplified: __A = row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue __A = row.copy()[: len(__UpperCamelCase ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(__UpperCamelCase ) == 0: solutions.append(0 ) continue __A = temp_row[1::] __A = temp_row[::-1] for column_index, column in enumerate(__UpperCamelCase ): current_solution -= column * solutions[column_index] solutions.append(__UpperCamelCase ) __A = [] for item in solutions: final.append(float(round(__UpperCamelCase , 5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() lowercase_ = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
266
1
"""simple docstring""" import json import os import unittest from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : Union[str, Any] = OpenAIGPTTokenizer A_ : Optional[Any] = OpenAIGPTTokenizerFast A_ : Optional[int] = True A_ : Optional[Any] = False def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __A = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] __A = dict(zip(_lowerCamelCase, range(len(_lowerCamelCase ) ) ) ) __A = ['''#version: 0.2''', '''l o''', '''lo w''', '''e r</w>''', ''''''] __A = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] ) __A = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file, '''w''' ) as fp: fp.write(json.dumps(_lowerCamelCase ) ) with open(self.merges_file, '''w''' ) as fp: fp.write('''\n'''.join(_lowerCamelCase ) ) def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : List[str] ): '''simple docstring''' return "lower newer", "lower newer" def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' __A = OpenAIGPTTokenizer(self.vocab_file, self.merges_file ) __A = '''lower''' __A = ['''low''', '''er</w>'''] __A = tokenizer.tokenize(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase, _lowerCamelCase ) __A = tokens + ['''<unk>'''] __A = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ), _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : List[str]=15 ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ): __A = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase, **_lowerCamelCase ) # Simple input __A = '''This is a simple input''' __A = ['''This is a simple input 1''', '''This is a simple input 2'''] __A = ('''This is a simple input''', '''This is a pair''') __A = [ ('''This is a simple input 1''', '''This is a simple input 2'''), ('''This is a simple pair 1''', '''This is a simple pair 2'''), ] # Simple input tests self.assertRaises(_lowerCamelCase, tokenizer_r.encode, _lowerCamelCase, max_length=_lowerCamelCase, padding='''max_length''' ) # Simple input self.assertRaises(_lowerCamelCase, tokenizer_r.encode_plus, _lowerCamelCase, max_length=_lowerCamelCase, padding='''max_length''' ) # Simple input self.assertRaises( _lowerCamelCase, tokenizer_r.batch_encode_plus, _lowerCamelCase, max_length=_lowerCamelCase, padding='''max_length''', ) # Pair input self.assertRaises(_lowerCamelCase, tokenizer_r.encode, _lowerCamelCase, max_length=_lowerCamelCase, padding='''max_length''' ) # Pair input self.assertRaises(_lowerCamelCase, tokenizer_r.encode_plus, _lowerCamelCase, max_length=_lowerCamelCase, padding='''max_length''' ) # Pair input self.assertRaises( _lowerCamelCase, tokenizer_r.batch_encode_plus, _lowerCamelCase, max_length=_lowerCamelCase, padding='''max_length''', ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' pass @require_ftfy @require_spacy @require_tokenizers class snake_case ( _lowerCAmelCase ): '''simple docstring''' pass
266
"""simple docstring""" from __future__ import annotations from typing import Any def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" if not postfix_notation: return 0 __A = {'''+''', '''-''', '''*''', '''/'''} __A = [] for token in postfix_notation: if token in operations: __A , __A = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(__UpperCamelCase ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
266
1
"""simple docstring""" import webbrowser from sys import argv from urllib.parse import parse_qs, quote import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": lowercase_ = '%20'.join(argv[1:]) if len(argv) > 1 else quote(str(input('Search: '))) print('Googling.....') lowercase_ = F'''https://www.google.com/search?q={query}&num=100''' lowercase_ = requests.get( url, headers={'User-Agent': str(UserAgent().random)}, ) try: lowercase_ = ( BeautifulSoup(res.text, 'html.parser') .find('div', attrs={'class': 'yuRUbf'}) .find('a') .get('href') ) except AttributeError: lowercase_ = parse_qs( BeautifulSoup(res.text, 'html.parser') .find('div', attrs={'class': 'kCrYT'}) .find('a') .get('href') )['url'][0] webbrowser.open(link)
266
"""simple docstring""" import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class snake_case ( unittest.TestCase ): '''simple docstring''' def __init__( self : Optional[Any], _lowerCamelCase : Tuple, _lowerCamelCase : List[str]=13, _lowerCamelCase : Optional[Any]=7, _lowerCamelCase : Optional[int]=True, _lowerCamelCase : int=True, _lowerCamelCase : List[str]=True, _lowerCamelCase : Optional[Any]=True, _lowerCamelCase : int=99, _lowerCamelCase : Optional[int]=32, _lowerCamelCase : Tuple=5, _lowerCamelCase : Tuple=4, _lowerCamelCase : str=37, _lowerCamelCase : Union[str, Any]="gelu", _lowerCamelCase : int=0.1, _lowerCamelCase : List[Any]=0.1, _lowerCamelCase : Dict=5_12, _lowerCamelCase : List[Any]=16, _lowerCamelCase : Any=2, _lowerCamelCase : Any=0.02, _lowerCamelCase : Dict=4, ): '''simple docstring''' __A = parent __A = batch_size __A = seq_length __A = is_training __A = use_attention_mask __A = use_token_type_ids __A = use_labels __A = vocab_size __A = hidden_size __A = num_hidden_layers __A = num_attention_heads __A = intermediate_size __A = hidden_act __A = hidden_dropout_prob __A = attention_probs_dropout_prob __A = max_position_embeddings __A = type_vocab_size __A = type_sequence_label_size __A = initializer_range __A = num_choices def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' __A = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) __A = None if self.use_attention_mask: __A = random_attention_mask([self.batch_size, self.seq_length] ) __A = None if self.use_token_type_ids: __A = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size ) __A = RoFormerConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=_lowerCamelCase, initializer_range=self.initializer_range, ) return config, input_ids, token_type_ids, attention_mask def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = self.prepare_config_and_inputs() __A , __A , __A , __A = config_and_inputs __A = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask} return config, inputs_dict @require_flax class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : Dict = True A_ : Tuple = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' __A = FlaxRoFormerModelTester(self ) @slow def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' for model_class_name in self.all_model_classes: __A = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''', from_pt=_lowerCamelCase ) __A = model(np.ones((1, 1) ) ) self.assertIsNotNone(_lowerCamelCase ) @require_flax class snake_case ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' ) __A = jnp.array([[0, 1, 2, 3, 4, 5]] ) __A = model(_lowerCamelCase )[0] __A = 5_00_00 __A = (1, 6, vocab_size) self.assertEqual(output.shape, _lowerCamelCase ) __A = jnp.array( [[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3], _lowerCamelCase, atol=1e-4 ) )
266
1
"""simple docstring""" import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''' , set() ) @pytest.fixture def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" class snake_case : '''simple docstring''' def __init__( self : Optional[Any], _lowerCamelCase : Optional[Any] ): '''simple docstring''' __A = metric_id class snake_case : '''simple docstring''' A_ : Union[str, Any] = [MetricMock(_lowerCAmelCase ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]] def _SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' return self._metrics monkeypatch.setattr('''datasets.inspect.huggingface_hub''' , HfhMock() ) @pytest.mark.parametrize( '''func, args''' , [(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))] ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" if "tmp_path" in args: __A = tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args ) with pytest.warns(__UpperCamelCase , match='''https://huggingface.co/docs/evaluate''' ): func(*__UpperCamelCase )
266
"""simple docstring""" from collections import defaultdict from math import ceil, sqrt def lowerCAmelCase ( __UpperCamelCase = 1_0_0_0_0_0_0 , __UpperCamelCase = 1_0 ): """simple docstring""" __A = defaultdict(__UpperCamelCase ) for outer_width in range(3 , (t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: __A = max( ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 ) else: __A = 1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(__UpperCamelCase , outer_width - 1 , 2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 1_0 ) if __name__ == "__main__": print(F'''{solution() = }''')
266
1
"""simple docstring""" from pathlib import Path import fire from tqdm import tqdm def lowerCAmelCase ( __UpperCamelCase="ro" , __UpperCamelCase="en" , __UpperCamelCase="wmt16" , __UpperCamelCase=None ): """simple docstring""" try: import datasets except (ModuleNotFoundError, ImportError): raise ImportError('''run pip install datasets''' ) __A = f'{src_lang}-{tgt_lang}' print(f'Converting {dataset}-{pair}' ) __A = datasets.load_dataset(__UpperCamelCase , __UpperCamelCase ) if save_dir is None: __A = f'{dataset}-{pair}' __A = Path(__UpperCamelCase ) save_dir.mkdir(exist_ok=__UpperCamelCase ) for split in ds.keys(): print(f'Splitting {split} with {ds[split].num_rows} records' ) # to save to val.source, val.target like summary datasets __A = '''val''' if split == '''validation''' else split __A = save_dir.joinpath(f'{fn}.source' ) __A = save_dir.joinpath(f'{fn}.target' ) __A = src_path.open('''w+''' ) __A = tgt_path.open('''w+''' ) # reader is the bottleneck so writing one record at a time doesn't slow things down for x in tqdm(ds[split] ): __A = x['''translation'''] src_fp.write(ex[src_lang] + '''\n''' ) tgt_fp.write(ex[tgt_lang] + '''\n''' ) print(f'Saved {dataset} dataset to {save_dir}' ) if __name__ == "__main__": fire.Fire(download_wmt_dataset)
266
"""simple docstring""" import numpy as np import torch from torch.utils.data import DataLoader from accelerate.utils.dataclasses import DistributedType class snake_case : '''simple docstring''' def __init__( self : Optional[int], _lowerCamelCase : Optional[int]=2, _lowerCamelCase : Optional[int]=3, _lowerCamelCase : int=64, _lowerCamelCase : List[str]=None ): '''simple docstring''' __A = np.random.default_rng(_lowerCamelCase ) __A = length __A = rng.normal(size=(length,) ).astype(np.floataa ) __A = a * self.x + b + rng.normal(scale=0.1, size=(length,) ).astype(np.floataa ) def __len__( self : str ): '''simple docstring''' return self.length def __getitem__( self : Dict, _lowerCamelCase : Optional[int] ): '''simple docstring''' return {"x": self.x[i], "y": self.y[i]} class snake_case ( torch.nn.Module ): '''simple docstring''' def __init__( self : Optional[Any], _lowerCamelCase : Tuple=0, _lowerCamelCase : Any=0, _lowerCamelCase : Optional[Any]=False ): '''simple docstring''' super().__init__() __A = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) __A = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) __A = True def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : Optional[Any]=None ): '''simple docstring''' if self.first_batch: print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' ) __A = False return x * self.a[0] + self.b[0] class snake_case ( torch.nn.Module ): '''simple docstring''' def __init__( self : str, _lowerCamelCase : Optional[Any]=0, _lowerCamelCase : Any=0, _lowerCamelCase : List[Any]=False ): '''simple docstring''' super().__init__() __A = torch.nn.Parameter(torch.tensor(_lowerCamelCase ).float() ) __A = torch.nn.Parameter(torch.tensor(_lowerCamelCase ).float() ) __A = True def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : List[str]=None ): '''simple docstring''' if self.first_batch: print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' ) __A = False return x * self.a + self.b def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase = 1_6 ): """simple docstring""" from datasets import load_dataset from transformers import AutoTokenizer __A = AutoTokenizer.from_pretrained('''bert-base-cased''' ) __A = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''} __A = load_dataset('''csv''' , data_files=__UpperCamelCase ) __A = datasets['''train'''].unique('''label''' ) __A = {v: i for i, v in enumerate(__UpperCamelCase )} def tokenize_function(__UpperCamelCase ): # max_length=None => use the model max length (it's actually the default) __A = tokenizer( examples['''sentence1'''] , examples['''sentence2'''] , truncation=__UpperCamelCase , max_length=__UpperCamelCase , padding='''max_length''' ) if "label" in examples: __A = [label_to_id[l] for l in examples['''label''']] return outputs # Apply the method we just defined to all the examples in all the splits of the dataset __A = datasets.map( __UpperCamelCase , batched=__UpperCamelCase , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , ) def collate_fn(__UpperCamelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(__UpperCamelCase , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' ) return tokenizer.pad(__UpperCamelCase , padding='''longest''' , return_tensors='''pt''' ) # Instantiate dataloaders. __A = DataLoader(tokenized_datasets['''train'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=2 ) __A = DataLoader(tokenized_datasets['''validation'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=1 ) return train_dataloader, eval_dataloader
266
1
"""simple docstring""" from __future__ import annotations from scipy.special import comb # type: ignore class snake_case : '''simple docstring''' def __init__( self : List[str], _lowerCamelCase : list[tuple[float, float]] ): '''simple docstring''' __A = list_of_points # Degree determines the flexibility of the curve. # Degree = 1 will produce a straight line. __A = len(_lowerCamelCase ) - 1 def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : float ): '''simple docstring''' assert 0 <= t <= 1, "Time t must be between 0 and 1." __A = [] for i in range(len(self.list_of_points ) ): # basis function for each i output_values.append( comb(self.degree, _lowerCamelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) ) # the basis must sum up to 1 for it to produce a valid Bezier curve. assert round(sum(_lowerCamelCase ), 5 ) == 1 return output_values def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : float ): '''simple docstring''' assert 0 <= t <= 1, "Time t must be between 0 and 1." __A = self.basis_function(_lowerCamelCase ) __A = 0.0 __A = 0.0 for i in range(len(self.list_of_points ) ): # For all points, sum up the product of i-th basis function and i-th point. x += basis_function[i] * self.list_of_points[i][0] y += basis_function[i] * self.list_of_points[i][1] return (x, y) def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : float = 0.01 ): '''simple docstring''' from matplotlib import pyplot as plt # type: ignore __A = [] # x coordinates of points to plot __A = [] # y coordinates of points to plot __A = 0.0 while t <= 1: __A = self.bezier_curve_function(_lowerCamelCase ) to_plot_x.append(value[0] ) to_plot_y.append(value[1] ) t += step_size __A = [i[0] for i in self.list_of_points] __A = [i[1] for i in self.list_of_points] plt.plot( _lowerCamelCase, _lowerCamelCase, color='''blue''', label='''Curve of Degree ''' + str(self.degree ), ) plt.scatter(_lowerCamelCase, _lowerCamelCase, color='''red''', label='''Control Points''' ) plt.legend() plt.show() if __name__ == "__main__": import doctest doctest.testmod() BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1 BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2 BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
266
"""simple docstring""" from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo lowercase_ = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n' lowercase_ = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n' lowercase_ = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case ( datasets.Metric ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''', id='''token''' ), id='''sequence''' ), '''references''': datasets.Sequence( datasets.Sequence(datasets.Value('''string''', id='''token''' ), id='''sequence''' ), id='''references''' ), } ), ) def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : List[List[List[str]]], _lowerCamelCase : List[List[str]], _lowerCamelCase : int = 1, _lowerCamelCase : int = 4, ): '''simple docstring''' return { "google_bleu": gleu_score.corpus_gleu( list_of_references=_lowerCamelCase, hypotheses=_lowerCamelCase, min_len=_lowerCamelCase, max_len=_lowerCamelCase ) }
266
1
"""simple docstring""" from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=_lowerCAmelCase ) class snake_case ( _lowerCAmelCase ): '''simple docstring''' A_ : str = field(default="language-modeling" , metadata={"include_in_asdict_even_if_is_default": True} ) A_ : ClassVar[Features] = Features({"text": Value("string" )} ) A_ : ClassVar[Features] = Features({} ) A_ : str = "text" @property def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' return {self.text_column: "text"}
266
"""simple docstring""" class snake_case : '''simple docstring''' def __init__( self : List[str], _lowerCamelCase : list[int] ): '''simple docstring''' __A = len(_lowerCamelCase ) __A = [0] * len_array if len_array > 0: __A = array[0] for i in range(1, _lowerCamelCase ): __A = self.prefix_sum[i - 1] + array[i] def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : int, _lowerCamelCase : int ): '''simple docstring''' if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : int ): '''simple docstring''' __A = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(_lowerCamelCase ) return False if __name__ == "__main__": import doctest doctest.testmod()
266
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_flava import FlavaImageProcessor lowercase_ = logging.get_logger(__name__) class snake_case ( _lowerCAmelCase ): '''simple docstring''' def __init__( self : Optional[int], *_lowerCamelCase : List[Any], **_lowerCamelCase : Optional[int] ): '''simple docstring''' warnings.warn( '''The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use FlavaImageProcessor instead.''', _lowerCamelCase, ) super().__init__(*_lowerCamelCase, **_lowerCamelCase )
266
"""simple docstring""" import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowercase_ = logging.get_logger(__name__) lowercase_ = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } lowercase_ = { 'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'}, 'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'}, 'tokenizer_config_file': { 'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json' }, } lowercase_ = {'facebook/blenderbot-3B': 128} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def lowerCAmelCase ( ): """simple docstring""" __A = ( list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) ) ) __A = bs[:] __A = 0 for b in range(2**8 ): if b not in bs: bs.append(__UpperCamelCase ) cs.append(2**8 + n ) n += 1 __A = [chr(__UpperCamelCase ) for n in cs] return dict(zip(__UpperCamelCase , __UpperCamelCase ) ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = set() __A = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __A = char return pairs class snake_case ( _lowerCAmelCase ): '''simple docstring''' A_ : Tuple = VOCAB_FILES_NAMES A_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP A_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A_ : Optional[Any] = ["input_ids", "attention_mask"] def __init__( self : Dict, _lowerCamelCase : Optional[Any], _lowerCamelCase : List[str], _lowerCamelCase : Dict="replace", _lowerCamelCase : Any="<s>", _lowerCamelCase : Optional[int]="</s>", _lowerCamelCase : Dict="</s>", _lowerCamelCase : List[Any]="<s>", _lowerCamelCase : List[str]="<unk>", _lowerCamelCase : str="<pad>", _lowerCamelCase : Any="<mask>", _lowerCamelCase : Any=False, **_lowerCamelCase : Tuple, ): '''simple docstring''' __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else bos_token __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else eos_token __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else sep_token __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else cls_token __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else unk_token __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else mask_token super().__init__( errors=_lowerCamelCase, bos_token=_lowerCamelCase, eos_token=_lowerCamelCase, unk_token=_lowerCamelCase, sep_token=_lowerCamelCase, cls_token=_lowerCamelCase, pad_token=_lowerCamelCase, mask_token=_lowerCamelCase, add_prefix_space=_lowerCamelCase, **_lowerCamelCase, ) with open(_lowerCamelCase, encoding='''utf-8''' ) as vocab_handle: __A = json.load(_lowerCamelCase ) __A = {v: k for k, v in self.encoder.items()} __A = errors # how to handle errors in decoding __A = bytes_to_unicode() __A = {v: k for k, v in self.byte_encoder.items()} with open(_lowerCamelCase, encoding='''utf-8''' ) as merges_handle: __A = merges_handle.read().split('''\n''' )[1:-1] __A = [tuple(merge.split() ) for merge in bpe_merges] __A = dict(zip(_lowerCamelCase, range(len(_lowerCamelCase ) ) ) ) __A = {} __A = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __A = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' return len(self.encoder ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' return dict(self.encoder, **self.added_tokens_encoder ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : List[Any] ): '''simple docstring''' if token in self.cache: return self.cache[token] __A = tuple(_lowerCamelCase ) __A = get_pairs(_lowerCamelCase ) if not pairs: return token while True: __A = min(_lowerCamelCase, key=lambda _lowerCamelCase : self.bpe_ranks.get(_lowerCamelCase, float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break __A , __A = bigram __A = [] __A = 0 while i < len(_lowerCamelCase ): try: __A = word.index(_lowerCamelCase, _lowerCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __A = j if word[i] == first and i < len(_lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __A = tuple(_lowerCamelCase ) __A = new_word if len(_lowerCamelCase ) == 1: break else: __A = get_pairs(_lowerCamelCase ) __A = ''' '''.join(_lowerCamelCase ) __A = word return word def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : Dict ): '''simple docstring''' __A = [] for token in re.findall(self.pat, _lowerCamelCase ): __A = ''''''.join( self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowerCamelCase ).split(''' ''' ) ) return bpe_tokens def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : Dict ): '''simple docstring''' return self.encoder.get(_lowerCamelCase, self.encoder.get(self.unk_token ) ) def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : Any ): '''simple docstring''' return self.decoder.get(_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : Dict ): '''simple docstring''' __A = ''''''.join(_lowerCamelCase ) __A = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''', errors=self.errors ) return text def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : str, _lowerCamelCase : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(_lowerCamelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __A = os.path.join( _lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) __A = os.path.join( _lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(_lowerCamelCase, '''w''', encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=_lowerCamelCase, ensure_ascii=_lowerCamelCase ) + '''\n''' ) __A = 0 with open(_lowerCamelCase, '''w''', encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda _lowerCamelCase : kv[1] ): if index != token_index: logger.warning( f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.' ''' Please check that the tokenizer is not corrupted!''' ) __A = token_index writer.write(''' '''.join(_lowerCamelCase ) + '''\n''' ) index += 1 return vocab_file, merge_file def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : List[int], _lowerCamelCase : Optional[List[int]] = None, _lowerCamelCase : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCamelCase, token_ids_a=_lowerCamelCase, already_has_special_tokens=_lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(_lowerCamelCase )) + [1] return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1] def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : List[int], _lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' __A = [self.sep_token_id] __A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : Union[str, Any], _lowerCamelCase : List[str]=False, **_lowerCamelCase : List[Any] ): '''simple docstring''' __A = kwargs.pop('''add_prefix_space''', self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(_lowerCamelCase ) > 0 and not text[0].isspace()): __A = ''' ''' + text return (text, kwargs) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : List[int], _lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' return token_ids_a + [self.eos_token_id] def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : "Conversation" ): '''simple docstring''' __A = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(''' ''' + text ) else: # Generated responses should contain them already. inputs.append(_lowerCamelCase ) __A = ''' '''.join(_lowerCamelCase ) __A = self.encode(_lowerCamelCase ) if len(_lowerCamelCase ) > self.model_max_length: __A = input_ids[-self.model_max_length :] logger.warning(f'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' ) return input_ids
266
1
"""simple docstring""" import math import random from typing import Any from .hill_climbing import SearchProblem def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase = True , __UpperCamelCase = math.inf , __UpperCamelCase = -math.inf , __UpperCamelCase = math.inf , __UpperCamelCase = -math.inf , __UpperCamelCase = False , __UpperCamelCase = 1_0_0 , __UpperCamelCase = 0.01 , __UpperCamelCase = 1 , ): """simple docstring""" __A = False __A = search_prob __A = start_temperate __A = [] __A = 0 __A = None while not search_end: __A = current_state.score() if best_state is None or current_score > best_state.score(): __A = current_state scores.append(__UpperCamelCase ) iterations += 1 __A = None __A = current_state.get_neighbors() while ( next_state is None and neighbors ): # till we do not find a neighbor that we can move to __A = random.randint(0 , len(__UpperCamelCase ) - 1 ) # picking a random neighbor __A = neighbors.pop(__UpperCamelCase ) __A = picked_neighbor.score() - current_score if ( picked_neighbor.x > max_x or picked_neighbor.x < min_x or picked_neighbor.y > max_y or picked_neighbor.y < min_y ): continue # neighbor outside our bounds if not find_max: __A = change * -1 # in case we are finding minimum if change > 0: # improves the solution __A = picked_neighbor else: __A = (math.e) ** ( change / current_temp ) # probability generation function if random.random() < probability: # random number within probability __A = picked_neighbor __A = current_temp - (current_temp * rate_of_decrease) if current_temp < threshold_temp or next_state is None: # temperature below threshold, or could not find a suitable neighbor __A = True else: __A = next_state if visualization: from matplotlib import pyplot as plt plt.plot(range(__UpperCamelCase ) , __UpperCamelCase ) plt.xlabel('''Iterations''' ) plt.ylabel('''Function values''' ) plt.show() return best_state if __name__ == "__main__": def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" return (x**2) + (y**2) # starting the problem with initial coordinates (12, 47) lowercase_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) lowercase_ = simulated_annealing( prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True ) print( 'The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 ' F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}''' ) # starting the problem with initial coordinates (12, 47) lowercase_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) lowercase_ = simulated_annealing( prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True ) print( 'The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 ' F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}''' ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" return (3 * x**2) - (6 * y) lowercase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) lowercase_ = simulated_annealing(prob, find_max=False, visualization=True) print( 'The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: ' F'''{local_min.score()}''' ) lowercase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) lowercase_ = simulated_annealing(prob, find_max=True, visualization=True) print( 'The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: ' F'''{local_min.score()}''' )
266
"""simple docstring""" import importlib import inspect import json import os import re import shutil import sys from pathlib import Path from typing import Dict, Optional, Union from urllib import request from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info from packaging import version from .. import __version__ from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging lowercase_ = ( 'https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py' ) lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name def lowerCAmelCase ( ): """simple docstring""" __A = '''https://pypi.org/pypi/diffusers/json''' __A = json.loads(request.urlopen(__UpperCamelCase ).read() )['''releases'''].keys() return sorted(__UpperCamelCase , key=lambda __UpperCamelCase : version.Version(__UpperCamelCase ) ) def lowerCAmelCase ( ): """simple docstring""" if HF_MODULES_CACHE in sys.path: return sys.path.append(__UpperCamelCase ) os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase ) __A = Path(__UpperCamelCase ) / '''__init__.py''' if not init_path.exists(): init_path.touch() def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" init_hf_modules() __A = Path(__UpperCamelCase ) / name # If the parent module does not exist yet, recursively create it. if not dynamic_module_path.parent.exists(): create_dynamic_module(dynamic_module_path.parent ) os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase ) __A = dynamic_module_path / '''__init__.py''' if not init_path.exists(): init_path.touch() def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" with open(__UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f: __A = f.read() # Imports of the form `import .xxx` __A = re.findall('''^\s*import\s+\.(\S+)\s*$''' , __UpperCamelCase , flags=re.MULTILINE ) # Imports of the form `from .xxx import yyy` relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , __UpperCamelCase , flags=re.MULTILINE ) # Unique-ify return list(set(__UpperCamelCase ) ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = False __A = [module_file] __A = [] # Let's recurse through all relative imports while not no_change: __A = [] for f in files_to_check: new_imports.extend(get_relative_imports(__UpperCamelCase ) ) __A = Path(__UpperCamelCase ).parent __A = [str(module_path / m ) for m in new_imports] __A = [f for f in new_import_files if f not in all_relative_imports] __A = [f'{f}.py' for f in new_import_files] __A = len(__UpperCamelCase ) == 0 all_relative_imports.extend(__UpperCamelCase ) return all_relative_imports def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" with open(__UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f: __A = f.read() # Imports of the form `import xxx` __A = re.findall('''^\s*import\s+(\S+)\s*$''' , __UpperCamelCase , flags=re.MULTILINE ) # Imports of the form `from xxx import yyy` imports += re.findall('''^\s*from\s+(\S+)\s+import''' , __UpperCamelCase , flags=re.MULTILINE ) # Only keep the top-level module __A = [imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )] # Unique-ify and test we got them all __A = list(set(__UpperCamelCase ) ) __A = [] for imp in imports: try: importlib.import_module(__UpperCamelCase ) except ImportError: missing_packages.append(__UpperCamelCase ) if len(__UpperCamelCase ) > 0: raise ImportError( '''This modeling file requires the following packages that were not found in your environment: ''' f'{", ".join(__UpperCamelCase )}. Run `pip install {" ".join(__UpperCamelCase )}`' ) return get_relative_imports(__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" __A = module_path.replace(os.path.sep , '''.''' ) __A = importlib.import_module(__UpperCamelCase ) if class_name is None: return find_pipeline_class(__UpperCamelCase ) return getattr(__UpperCamelCase , __UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" from ..pipelines import DiffusionPipeline __A = dict(inspect.getmembers(__UpperCamelCase , inspect.isclass ) ) __A = None for cls_name, cls in cls_members.items(): if ( cls_name != DiffusionPipeline.__name__ and issubclass(cls , __UpperCamelCase ) and cls.__module__.split('''.''' )[0] != "diffusers" ): if pipeline_class is not None: raise ValueError( f'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:' f' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in' f' {loaded_module}.' ) __A = cls return pipeline_class def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , ): """simple docstring""" __A = str(__UpperCamelCase ) __A = os.path.join(__UpperCamelCase , __UpperCamelCase ) if os.path.isfile(__UpperCamelCase ): __A = module_file_or_url __A = '''local''' elif pretrained_model_name_or_path.count('''/''' ) == 0: __A = get_diffusers_versions() # cut ".dev0" __A = '''v''' + '''.'''.join(__version__.split('''.''' )[:3] ) # retrieve github version that matches if revision is None: __A = latest_version if latest_version[1:] in available_versions else '''main''' logger.info(f'Defaulting to latest_version: {revision}.' ) elif revision in available_versions: __A = f'v{revision}' elif revision == "main": __A = revision else: raise ValueError( f'`custom_revision`: {revision} does not exist. Please make sure to choose one of' f' {", ".join(available_versions + ["main"] )}.' ) # community pipeline on GitHub __A = COMMUNITY_PIPELINES_URL.format(revision=__UpperCamelCase , pipeline=__UpperCamelCase ) try: __A = cached_download( __UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , proxies=__UpperCamelCase , resume_download=__UpperCamelCase , local_files_only=__UpperCamelCase , use_auth_token=__UpperCamelCase , ) __A = '''git''' __A = pretrained_model_name_or_path + '''.py''' except EnvironmentError: logger.error(f'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' ) raise else: try: # Load from URL or cache if already cached __A = hf_hub_download( __UpperCamelCase , __UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , proxies=__UpperCamelCase , resume_download=__UpperCamelCase , local_files_only=__UpperCamelCase , use_auth_token=__UpperCamelCase , ) __A = os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) ) except EnvironmentError: logger.error(f'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' ) raise # Check we have all the requirements in our environment __A = check_imports(__UpperCamelCase ) # Now we move the module inside our cached dynamic modules. __A = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule create_dynamic_module(__UpperCamelCase ) __A = Path(__UpperCamelCase ) / full_submodule if submodule == "local" or submodule == "git": # We always copy local files (we could hash the file to see if there was a change, and give them the name of # that hash, to only copy when there is a modification but it seems overkill for now). # The only reason we do the copy is to avoid putting too many folders in sys.path. shutil.copy(__UpperCamelCase , submodule_path / module_file ) for module_needed in modules_needed: __A = f'{module_needed}.py' shutil.copy(os.path.join(__UpperCamelCase , __UpperCamelCase ) , submodule_path / module_needed ) else: # Get the commit hash # TODO: we will get this info in the etag soon, so retrieve it from there and not here. if isinstance(__UpperCamelCase , __UpperCamelCase ): __A = use_auth_token elif use_auth_token is True: __A = HfFolder.get_token() else: __A = None __A = model_info(__UpperCamelCase , revision=__UpperCamelCase , token=__UpperCamelCase ).sha # The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the # benefit of versioning. __A = submodule_path / commit_hash __A = full_submodule + os.path.sep + commit_hash create_dynamic_module(__UpperCamelCase ) if not (submodule_path / module_file).exists(): shutil.copy(__UpperCamelCase , submodule_path / module_file ) # Make sure we also have every file with relative for module_needed in modules_needed: if not (submodule_path / module_needed).exists(): get_cached_module_file( __UpperCamelCase , f'{module_needed}.py' , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , resume_download=__UpperCamelCase , proxies=__UpperCamelCase , use_auth_token=__UpperCamelCase , revision=__UpperCamelCase , local_files_only=__UpperCamelCase , ) return os.path.join(__UpperCamelCase , __UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , **__UpperCamelCase , ): """simple docstring""" __A = get_cached_module_file( __UpperCamelCase , __UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , resume_download=__UpperCamelCase , proxies=__UpperCamelCase , use_auth_token=__UpperCamelCase , revision=__UpperCamelCase , local_files_only=__UpperCamelCase , ) return get_class_in_module(__UpperCamelCase , final_module.replace('''.py''' , '''''' ) )
266
1
"""simple docstring""" import json import os import tempfile import unittest import unittest.mock as mock from pathlib import Path from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, cached_file, get_file_from_repo, has_file, ) lowercase_ = 'hf-internal-testing/tiny-random-bert' lowercase_ = os.path.join(TRANSFORMERS_CACHE, 'models--hf-internal-testing--tiny-random-bert') lowercase_ = '9b8c223d42b2188cb49d29af482996f9d0f3e5a6' class snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' __A = cached_file(_lowerCamelCase, _lowerCamelCase ) # Should have downloaded the file in here self.assertTrue(os.path.isdir(_lowerCamelCase ) ) # Cache should contain at least those three subfolders: for subfolder in ["blobs", "refs", "snapshots"]: self.assertTrue(os.path.isdir(os.path.join(_lowerCamelCase, _lowerCamelCase ) ) ) with open(os.path.join(_lowerCamelCase, '''refs''', '''main''' ) ) as f: __A = f.read() self.assertEqual(_lowerCamelCase, os.path.join(_lowerCamelCase, '''snapshots''', _lowerCamelCase, _lowerCamelCase ) ) self.assertTrue(os.path.isfile(_lowerCamelCase ) ) # File is cached at the same place the second time. __A = cached_file(_lowerCamelCase, _lowerCamelCase ) self.assertEqual(_lowerCamelCase, _lowerCamelCase ) # Using a specific revision to test the full commit hash. __A = cached_file(_lowerCamelCase, _lowerCamelCase, revision='''9b8c223''' ) self.assertEqual(_lowerCamelCase, os.path.join(_lowerCamelCase, '''snapshots''', _lowerCamelCase, _lowerCamelCase ) ) def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' with self.assertRaisesRegex(_lowerCamelCase, '''is not a valid model identifier''' ): __A = cached_file('''tiny-random-bert''', _lowerCamelCase ) with self.assertRaisesRegex(_lowerCamelCase, '''is not a valid git identifier''' ): __A = cached_file(_lowerCamelCase, _lowerCamelCase, revision='''aaaa''' ) with self.assertRaisesRegex(_lowerCamelCase, '''does not appear to have a file named''' ): __A = cached_file(_lowerCamelCase, '''conf''' ) def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' with self.assertRaisesRegex(_lowerCamelCase, '''does not appear to have a file named''' ): __A = cached_file(_lowerCamelCase, '''conf''' ) with open(os.path.join(_lowerCamelCase, '''refs''', '''main''' ) ) as f: __A = f.read() self.assertTrue(os.path.isfile(os.path.join(_lowerCamelCase, '''.no_exist''', _lowerCamelCase, '''conf''' ) ) ) __A = cached_file(_lowerCamelCase, '''conf''', _raise_exceptions_for_missing_entries=_lowerCamelCase ) self.assertIsNone(_lowerCamelCase ) __A = cached_file(_lowerCamelCase, '''conf''', local_files_only=_lowerCamelCase, _raise_exceptions_for_missing_entries=_lowerCamelCase ) self.assertIsNone(_lowerCamelCase ) __A = mock.Mock() __A = 5_00 __A = {} __A = HTTPError __A = {} # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('''requests.Session.request''', return_value=_lowerCamelCase ) as mock_head: __A = cached_file(_lowerCamelCase, '''conf''', _raise_exceptions_for_connection_errors=_lowerCamelCase ) self.assertIsNone(_lowerCamelCase ) # This check we did call the fake head request mock_head.assert_called() def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''', _lowerCamelCase ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''', _lowerCamelCase ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''', _lowerCamelCase ) ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' # `get_file_from_repo` returns None if the file does not exist self.assertIsNone(get_file_from_repo('''bert-base-cased''', '''ahah.txt''' ) ) # The function raises if the repository does not exist. with self.assertRaisesRegex(_lowerCamelCase, '''is not a valid model identifier''' ): get_file_from_repo('''bert-base-case''', _lowerCamelCase ) # The function raises if the revision does not exist. with self.assertRaisesRegex(_lowerCamelCase, '''is not a valid git identifier''' ): get_file_from_repo('''bert-base-cased''', _lowerCamelCase, revision='''ahaha''' ) __A = get_file_from_repo('''bert-base-cased''', _lowerCamelCase ) # The name is the cached name which is not very easy to test, so instead we load the content. __A = json.loads(open(_lowerCamelCase, '''r''' ).read() ) self.assertEqual(config['''hidden_size'''], 7_68 ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: __A = Path(_lowerCamelCase ) / '''a.txt''' filename.touch() self.assertEqual(get_file_from_repo(_lowerCamelCase, '''a.txt''' ), str(_lowerCamelCase ) ) self.assertIsNone(get_file_from_repo(_lowerCamelCase, '''b.txt''' ) )
266
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : Optional[int] ): '''simple docstring''' for model_result in results.values(): for batch_size, sequence_length in zip(model_result['''bs'''], model_result['''ss'''] ): __A = model_result['''result'''][batch_size][sequence_length] self.assertIsNotNone(_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = '''sgugger/tiny-distilbert-classification''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, only_pretrain_model=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, torchscript=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == '''cpu''', '''Cant do half precision''' ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, fpaa=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = AutoConfig.from_pretrained(_lowerCamelCase ) # set architectures equal to `None` __A = None __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase, configs=[config] ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == '''cpu''', '''Can\'t do half precision''' ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], fpaa=_lowerCamelCase, multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = AutoConfig.from_pretrained(_lowerCamelCase ) __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase, configs=[config] ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' __A = '''sshleifer/tinier_bart''' __A = AutoConfig.from_pretrained(_lowerCamelCase ) __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase, configs=[config] ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = AutoConfig.from_pretrained(_lowerCamelCase ) __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase, configs=[config] ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = '''sshleifer/tinier_bart''' __A = AutoConfig.from_pretrained(_lowerCamelCase ) __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase, configs=[config] ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' with tempfile.TemporaryDirectory() as tmp_dir: __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, save_to_csv=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], inference_time_csv_file=os.path.join(_lowerCamelCase, '''inf_time.csv''' ), train_memory_csv_file=os.path.join(_lowerCamelCase, '''train_mem.csv''' ), inference_memory_csv_file=os.path.join(_lowerCamelCase, '''inf_mem.csv''' ), train_time_csv_file=os.path.join(_lowerCamelCase, '''train_time.csv''' ), env_info_csv_file=os.path.join(_lowerCamelCase, '''env.csv''' ), multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) benchmark.run() self.assertTrue(Path(os.path.join(_lowerCamelCase, '''inf_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCamelCase, '''train_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCamelCase, '''inf_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCamelCase, '''train_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCamelCase, '''env.csv''' ) ).exists() ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' def _check_summary_is_not_empty(_lowerCamelCase : List[Any] ): self.assertTrue(hasattr(_lowerCamelCase, '''sequential''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''cumulative''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''current''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''total''' ) ) with tempfile.TemporaryDirectory() as tmp_dir: __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], log_filename=os.path.join(_lowerCamelCase, '''log.txt''' ), log_print=_lowerCamelCase, trace_memory_line_by_line=_lowerCamelCase, multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(_lowerCamelCase, '''log.txt''' ) ).exists() )
266
1
"""simple docstring""" import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowercase_ = get_tests_dir('fixtures/test_sentencepiece_no_bos.model') @require_sentencepiece @require_tokenizers class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : Optional[Any] = PegasusTokenizer A_ : int = PegasusTokenizerFast A_ : Optional[Any] = True A_ : Union[str, Any] = True def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing __A = PegasusTokenizer(_lowerCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' return PegasusTokenizer.from_pretrained('''google/pegasus-large''' ) def _SCREAMING_SNAKE_CASE ( self : int, **_lowerCamelCase : List[Any] ): '''simple docstring''' return PegasusTokenizer.from_pretrained(self.tmpdirname, **_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : Dict ): '''simple docstring''' return ("This is a test", "This is a test") def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' __A = '''</s>''' __A = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ), _lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ), _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' __A = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0], '''<pad>''' ) self.assertEqual(vocab_keys[1], '''</s>''' ) self.assertEqual(vocab_keys[-1], '''v''' ) self.assertEqual(len(_lowerCamelCase ), 11_03 ) def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size, 11_03 ) def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) __A = self.tokenizer_class.from_pretrained(self.tmpdirname ) __A = ( '''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important''' ''' </s> <pad> <pad> <pad>''' ) __A = rust_tokenizer([raw_input_str], return_tensors=_lowerCamelCase, add_special_tokens=_lowerCamelCase ).input_ids[0] __A = py_tokenizer([raw_input_str], return_tensors=_lowerCamelCase, add_special_tokens=_lowerCamelCase ).input_ids[0] self.assertListEqual(_lowerCamelCase, _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' __A = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word __A = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.''' __A = [2, 4_13, 6_15, 1_14, 3, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1] __A = tokenizer([raw_input_str], return_tensors=_lowerCamelCase ).input_ids[0] self.assertListEqual(_lowerCamelCase, _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 9_61_03 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 1_03 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_05 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 10_24 __A = '''To ensure a smooth flow of bank resolutions.''' __A = [4_13, 6_15, 1_14, 22_91, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1] __A = tokenizer([raw_input_str], return_tensors=_lowerCamelCase ).input_ids[0] self.assertListEqual(_lowerCamelCase, _lowerCamelCase ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = ['''This is going to be way too long.''' * 1_50, '''short example'''] __A = ['''not super long but more than 5 tokens''', '''tiny'''] __A = self._large_tokenizer(_lowerCamelCase, padding=_lowerCamelCase, truncation=_lowerCamelCase, return_tensors='''pt''' ) __A = self._large_tokenizer( text_target=_lowerCamelCase, max_length=5, padding=_lowerCamelCase, truncation=_lowerCamelCase, return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 10_24) assert batch.attention_mask.shape == (2, 10_24) assert targets["input_ids"].shape == (2, 5) assert len(_lowerCamelCase ) == 2 # input_ids, attention_mask. @slow def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' # fmt: off __A = {'''input_ids''': [[3_89_79, 1_43, 1_84_85, 6_06, 1_30, 2_66_69, 8_76_86, 1_21, 5_41_89, 11_29, 1_11, 2_66_69, 8_76_86, 1_21, 91_14, 1_47_87, 1_21, 1_32_49, 1_58, 5_92, 9_56, 1_21, 1_46_21, 3_15_76, 1_43, 6_26_13, 1_08, 96_88, 9_30, 4_34_30, 1_15_62, 6_26_13, 3_04, 1_08, 1_14_43, 8_97, 1_08, 93_14, 1_74_15, 6_33_99, 1_08, 1_14_43, 76_14, 1_83_16, 1_18, 42_84, 71_48, 1_24_30, 1_43, 14_00, 2_57_03, 1_58, 1_11, 42_84, 71_48, 1_17_72, 1_43, 2_12_97, 10_64, 1_58, 1_22, 2_04, 35_06, 17_54, 11_33, 1_47_87, 15_81, 1_15, 3_32_24, 44_82, 1_11, 13_55, 1_10, 2_91_73, 3_17, 5_08_33, 1_08, 2_01_47, 9_46_65, 1_11, 7_71_98, 1_07, 1], [1_10, 6_26_13, 1_17, 6_38, 1_12, 11_33, 1_21, 2_00_98, 13_55, 7_90_50, 1_38_72, 1_35, 15_96, 5_35_41, 13_52, 1_41, 1_30_39, 55_42, 1_24, 3_02, 5_18, 1_11, 2_68, 29_56, 1_15, 1_49, 44_27, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_39, 12_35, 27_99, 1_82_89, 1_77_80, 2_04, 1_09, 94_74, 12_96, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_lowerCamelCase, model_name='''google/bigbird-pegasus-large-arxiv''', revision='''ba85d0851d708441f91440d509690f1ab6353415''', ) @require_sentencepiece @require_tokenizers class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : str = PegasusTokenizer A_ : Union[str, Any] = PegasusTokenizerFast A_ : Any = True A_ : str = True def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing __A = PegasusTokenizer(_lowerCamelCase, offset=0, mask_token_sent=_lowerCamelCase, mask_token='''[MASK]''' ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' ) def _SCREAMING_SNAKE_CASE ( self : Optional[int], **_lowerCamelCase : Dict ): '''simple docstring''' return PegasusTokenizer.from_pretrained(self.tmpdirname, **_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : List[str] ): '''simple docstring''' return ("This is a test", "This is a test") def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' __A = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) __A = self.tokenizer_class.from_pretrained(self.tmpdirname ) __A = ( '''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>''' ''' <pad> <pad> <pad>''' ) __A = rust_tokenizer([raw_input_str], return_tensors=_lowerCamelCase, add_special_tokens=_lowerCamelCase ).input_ids[0] __A = py_tokenizer([raw_input_str], return_tensors=_lowerCamelCase, add_special_tokens=_lowerCamelCase ).input_ids[0] self.assertListEqual(_lowerCamelCase, _lowerCamelCase ) @require_torch def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' __A = ['''This is going to be way too long.''' * 10_00, '''short example'''] __A = ['''not super long but more than 5 tokens''', '''tiny'''] __A = self._large_tokenizer(_lowerCamelCase, padding=_lowerCamelCase, truncation=_lowerCamelCase, return_tensors='''pt''' ) __A = self._large_tokenizer( text_target=_lowerCamelCase, max_length=5, padding=_lowerCamelCase, truncation=_lowerCamelCase, return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 40_96) assert batch.attention_mask.shape == (2, 40_96) assert targets["input_ids"].shape == (2, 5) assert len(_lowerCamelCase ) == 2 # input_ids, attention_mask. def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' __A = ( '''This is an example string that is used to test the original TF implementation against the HF''' ''' implementation''' ) __A = self._large_tokenizer(_lowerCamelCase ).input_ids self.assertListEqual( _lowerCamelCase, [1_82, 1_17, 1_42, 5_87, 42_11, 1_20, 1_17, 2_63, 1_12, 8_04, 1_09, 8_56, 2_50_16, 31_37, 4_64, 1_09, 2_69_55, 31_37, 1], )
266
"""simple docstring""" import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowercase_ = get_tests_dir('fixtures/test_sentencepiece_no_bos.model') @require_sentencepiece @require_tokenizers class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : Optional[Any] = PegasusTokenizer A_ : int = PegasusTokenizerFast A_ : Optional[Any] = True A_ : Union[str, Any] = True def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing __A = PegasusTokenizer(_lowerCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' return PegasusTokenizer.from_pretrained('''google/pegasus-large''' ) def _SCREAMING_SNAKE_CASE ( self : int, **_lowerCamelCase : List[Any] ): '''simple docstring''' return PegasusTokenizer.from_pretrained(self.tmpdirname, **_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : Dict ): '''simple docstring''' return ("This is a test", "This is a test") def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' __A = '''</s>''' __A = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ), _lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ), _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' __A = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0], '''<pad>''' ) self.assertEqual(vocab_keys[1], '''</s>''' ) self.assertEqual(vocab_keys[-1], '''v''' ) self.assertEqual(len(_lowerCamelCase ), 11_03 ) def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size, 11_03 ) def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) __A = self.tokenizer_class.from_pretrained(self.tmpdirname ) __A = ( '''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important''' ''' </s> <pad> <pad> <pad>''' ) __A = rust_tokenizer([raw_input_str], return_tensors=_lowerCamelCase, add_special_tokens=_lowerCamelCase ).input_ids[0] __A = py_tokenizer([raw_input_str], return_tensors=_lowerCamelCase, add_special_tokens=_lowerCamelCase ).input_ids[0] self.assertListEqual(_lowerCamelCase, _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' __A = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word __A = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.''' __A = [2, 4_13, 6_15, 1_14, 3, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1] __A = tokenizer([raw_input_str], return_tensors=_lowerCamelCase ).input_ids[0] self.assertListEqual(_lowerCamelCase, _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 9_61_03 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 1_03 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_05 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 10_24 __A = '''To ensure a smooth flow of bank resolutions.''' __A = [4_13, 6_15, 1_14, 22_91, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1] __A = tokenizer([raw_input_str], return_tensors=_lowerCamelCase ).input_ids[0] self.assertListEqual(_lowerCamelCase, _lowerCamelCase ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = ['''This is going to be way too long.''' * 1_50, '''short example'''] __A = ['''not super long but more than 5 tokens''', '''tiny'''] __A = self._large_tokenizer(_lowerCamelCase, padding=_lowerCamelCase, truncation=_lowerCamelCase, return_tensors='''pt''' ) __A = self._large_tokenizer( text_target=_lowerCamelCase, max_length=5, padding=_lowerCamelCase, truncation=_lowerCamelCase, return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 10_24) assert batch.attention_mask.shape == (2, 10_24) assert targets["input_ids"].shape == (2, 5) assert len(_lowerCamelCase ) == 2 # input_ids, attention_mask. @slow def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' # fmt: off __A = {'''input_ids''': [[3_89_79, 1_43, 1_84_85, 6_06, 1_30, 2_66_69, 8_76_86, 1_21, 5_41_89, 11_29, 1_11, 2_66_69, 8_76_86, 1_21, 91_14, 1_47_87, 1_21, 1_32_49, 1_58, 5_92, 9_56, 1_21, 1_46_21, 3_15_76, 1_43, 6_26_13, 1_08, 96_88, 9_30, 4_34_30, 1_15_62, 6_26_13, 3_04, 1_08, 1_14_43, 8_97, 1_08, 93_14, 1_74_15, 6_33_99, 1_08, 1_14_43, 76_14, 1_83_16, 1_18, 42_84, 71_48, 1_24_30, 1_43, 14_00, 2_57_03, 1_58, 1_11, 42_84, 71_48, 1_17_72, 1_43, 2_12_97, 10_64, 1_58, 1_22, 2_04, 35_06, 17_54, 11_33, 1_47_87, 15_81, 1_15, 3_32_24, 44_82, 1_11, 13_55, 1_10, 2_91_73, 3_17, 5_08_33, 1_08, 2_01_47, 9_46_65, 1_11, 7_71_98, 1_07, 1], [1_10, 6_26_13, 1_17, 6_38, 1_12, 11_33, 1_21, 2_00_98, 13_55, 7_90_50, 1_38_72, 1_35, 15_96, 5_35_41, 13_52, 1_41, 1_30_39, 55_42, 1_24, 3_02, 5_18, 1_11, 2_68, 29_56, 1_15, 1_49, 44_27, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_39, 12_35, 27_99, 1_82_89, 1_77_80, 2_04, 1_09, 94_74, 12_96, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_lowerCamelCase, model_name='''google/bigbird-pegasus-large-arxiv''', revision='''ba85d0851d708441f91440d509690f1ab6353415''', ) @require_sentencepiece @require_tokenizers class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : str = PegasusTokenizer A_ : Union[str, Any] = PegasusTokenizerFast A_ : Any = True A_ : str = True def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing __A = PegasusTokenizer(_lowerCamelCase, offset=0, mask_token_sent=_lowerCamelCase, mask_token='''[MASK]''' ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' ) def _SCREAMING_SNAKE_CASE ( self : Optional[int], **_lowerCamelCase : Dict ): '''simple docstring''' return PegasusTokenizer.from_pretrained(self.tmpdirname, **_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : List[str] ): '''simple docstring''' return ("This is a test", "This is a test") def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' __A = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) __A = self.tokenizer_class.from_pretrained(self.tmpdirname ) __A = ( '''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>''' ''' <pad> <pad> <pad>''' ) __A = rust_tokenizer([raw_input_str], return_tensors=_lowerCamelCase, add_special_tokens=_lowerCamelCase ).input_ids[0] __A = py_tokenizer([raw_input_str], return_tensors=_lowerCamelCase, add_special_tokens=_lowerCamelCase ).input_ids[0] self.assertListEqual(_lowerCamelCase, _lowerCamelCase ) @require_torch def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' __A = ['''This is going to be way too long.''' * 10_00, '''short example'''] __A = ['''not super long but more than 5 tokens''', '''tiny'''] __A = self._large_tokenizer(_lowerCamelCase, padding=_lowerCamelCase, truncation=_lowerCamelCase, return_tensors='''pt''' ) __A = self._large_tokenizer( text_target=_lowerCamelCase, max_length=5, padding=_lowerCamelCase, truncation=_lowerCamelCase, return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 40_96) assert batch.attention_mask.shape == (2, 40_96) assert targets["input_ids"].shape == (2, 5) assert len(_lowerCamelCase ) == 2 # input_ids, attention_mask. def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' __A = ( '''This is an example string that is used to test the original TF implementation against the HF''' ''' implementation''' ) __A = self._large_tokenizer(_lowerCamelCase ).input_ids self.assertListEqual( _lowerCamelCase, [1_82, 1_17, 1_42, 5_87, 42_11, 1_20, 1_17, 2_63, 1_12, 8_04, 1_09, 8_56, 2_50_16, 31_37, 4_64, 1_09, 2_69_55, 31_37, 1], )
266
1
"""simple docstring""" def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number if __name__ == "__main__": print('Program to check whether a number is a Perfect number or not...') lowercase_ = int(input('Enter number: ').strip()) print(F'''{number} is {"" if perfect(number) else "not "}a Perfect Number.''')
266
"""simple docstring""" import re def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" return [char.split() for char in re.split(r'''[^ a-z A-Z 0-9 \s]''' , str_ )] def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = split_input(str_ ) return "".join( [''''''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" try: __A = split_input(__UpperCamelCase ) if upper: __A = ''''''.join( [ separator.join([char.upper() for char in sub_str] ) for sub_str in string_split ] ) else: __A = ''''''.join( [ separator.join([char.lower() for char in sub_str] ) for sub_str in string_split ] ) return res_str except IndexError: return "not valid string" def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" return to_simple_case(__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" try: __A = to_simple_case(__UpperCamelCase ) return res_str[0].lower() + res_str[1:] except IndexError: return "not valid string" def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" return to_complex_case(__UpperCamelCase , __UpperCamelCase , '''_''' ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" return to_complex_case(__UpperCamelCase , __UpperCamelCase , '''-''' ) if __name__ == "__main__": __import__('doctest').testmod()
266
1
"""simple docstring""" from ...utils import is_note_seq_available, is_transformers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .notes_encoder import SpectrogramNotesEncoder from .continous_encoder import SpectrogramContEncoder from .pipeline_spectrogram_diffusion import ( SpectrogramContEncoder, SpectrogramDiffusionPipeline, TaFilmDecoder, ) try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .midi_utils import MidiProcessor
266
"""simple docstring""" from __future__ import annotations class snake_case : '''simple docstring''' def __init__( self : int, _lowerCamelCase : List[Any]=None ): '''simple docstring''' __A = data __A = None def __repr__( self : Union[str, Any] ): '''simple docstring''' __A = [] __A = self while temp: string_rep.append(f'{temp.data}' ) __A = temp.next return "->".join(_lowerCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" if not elements_list: raise Exception('''The Elements List is empty''' ) __A = __A = Node(elements_list[0] ) for i in range(1 , len(__UpperCamelCase ) ): __A = Node(elements_list[i] ) __A = current.next return head def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" if head_node is not None and isinstance(__UpperCamelCase , __UpperCamelCase ): print_reverse(head_node.next ) print(head_node.data ) def lowerCAmelCase ( ): """simple docstring""" from doctest import testmod testmod() __A = make_linked_list([1_4, 5_2, 1_4, 1_2, 4_3] ) print('''Linked List:''' ) print(__UpperCamelCase ) print('''Elements in Reverse:''' ) print_reverse(__UpperCamelCase ) if __name__ == "__main__": main()
266
1
"""simple docstring""" # Algorithm for the pigeonhole sorting def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = min(__UpperCamelCase ) # min() finds the minimum value __A = max(__UpperCamelCase ) # max() finds the maximum value __A = max_val - min_val + 1 # size is difference of max and min values plus one # list of pigeonholes of size equal to the variable size __A = [0] * size # Populate the pigeonholes. for x in a: assert isinstance(__UpperCamelCase , __UpperCamelCase ), "integers only please" holes[x - min_val] += 1 # Putting the elements back into the array in an order. __A = 0 for count in range(__UpperCamelCase ): while holes[count] > 0: holes[count] -= 1 __A = count + min_val i += 1 def lowerCAmelCase ( ): """simple docstring""" __A = [8, 3, 2, 7, 4, 6, 8] pigeonhole_sort(__UpperCamelCase ) print('''Sorted order is:''' , ''' '''.join(__UpperCamelCase ) ) if __name__ == "__main__": main()
266
"""simple docstring""" from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging lowercase_ = logging.get_logger(__name__) class snake_case ( _lowerCAmelCase ): '''simple docstring''' A_ : int = ["input_features", "attention_mask"] def __init__( self : Optional[Any], _lowerCamelCase : Union[str, Any]=80, _lowerCamelCase : int=1_60_00, _lowerCamelCase : Any=80, _lowerCamelCase : List[str]=0.0, _lowerCamelCase : int=True, _lowerCamelCase : Optional[Any]=True, _lowerCamelCase : Optional[int]=True, **_lowerCamelCase : List[str], ): '''simple docstring''' super().__init__(feature_size=_lowerCamelCase, sampling_rate=_lowerCamelCase, padding_value=_lowerCamelCase, **_lowerCamelCase ) __A = num_mel_bins __A = do_ceptral_normalize __A = normalize_means __A = normalize_vars __A = True def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : np.ndarray, ): '''simple docstring''' __A = waveform * (2**15) # Kaldi compliance: 16-bit signed integers __A = torch.from_numpy(_lowerCamelCase ).unsqueeze(0 ) __A = ta_kaldi.fbank(_lowerCamelCase, num_mel_bins=self.num_mel_bins, sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : np.ndarray, _lowerCamelCase : int, _lowerCamelCase : Optional[bool] = True, _lowerCamelCase : Optional[bool] = True, _lowerCamelCase : float = 0.0, ): '''simple docstring''' # make sure we normalize float32 arrays if normalize_means: __A = x[:input_length].mean(axis=0 ) __A = np.subtract(_lowerCamelCase, _lowerCamelCase ) if normalize_vars: __A = x[:input_length].std(axis=0 ) __A = np.divide(_lowerCamelCase, _lowerCamelCase ) if input_length < x.shape[0]: __A = padding_value # make sure array is in float32 __A = x.astype(np.floataa ) return x def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : List[np.ndarray], _lowerCamelCase : Optional[np.ndarray] = None ): '''simple docstring''' __A = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(_lowerCamelCase, _lowerCamelCase, self.normalize_means, self.normalize_vars, self.padding_value ) for x, n in zip(_lowerCamelCase, _lowerCamelCase ) ] def __call__( self : Optional[Any], _lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], _lowerCamelCase : Union[bool, str, PaddingStrategy] = False, _lowerCamelCase : Optional[int] = None, _lowerCamelCase : bool = False, _lowerCamelCase : Optional[int] = None, _lowerCamelCase : Optional[Union[str, TensorType]] = None, _lowerCamelCase : Optional[int] = None, _lowerCamelCase : Optional[bool] = None, **_lowerCamelCase : Optional[Any], ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of' f' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with' f' {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) __A = isinstance(_lowerCamelCase, np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'Only mono-channel audio is supported for input to {self}' ) __A = is_batched_numpy or ( isinstance(_lowerCamelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) )) ) if is_batched: __A = [np.asarray(_lowerCamelCase, dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(_lowerCamelCase, np.ndarray ): __A = np.asarray(_lowerCamelCase, dtype=np.floataa ) elif isinstance(_lowerCamelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __A = raw_speech.astype(np.floataa ) # always return batch if not is_batched: __A = [raw_speech] # extract fbank features __A = [self._extract_fbank_features(_lowerCamelCase ) for waveform in raw_speech] # convert into correct format for padding __A = BatchFeature({'''input_features''': features} ) __A = self.pad( _lowerCamelCase, padding=_lowerCamelCase, max_length=_lowerCamelCase, truncation=_lowerCamelCase, pad_to_multiple_of=_lowerCamelCase, return_attention_mask=_lowerCamelCase, **_lowerCamelCase, ) # make sure list is in array format __A = padded_inputs.get('''input_features''' ) if isinstance(input_features[0], _lowerCamelCase ): __A = [np.asarray(_lowerCamelCase, dtype=np.floataa ) for feature in input_features] __A = padded_inputs.get('''attention_mask''' ) if attention_mask is not None: __A = [np.asarray(_lowerCamelCase, dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: __A = ( np.array(_lowerCamelCase, dtype=np.intaa ) if self._get_padding_strategies(_lowerCamelCase, max_length=_lowerCamelCase ) is not PaddingStrategy.DO_NOT_PAD else None ) __A = self.normalize( padded_inputs['''input_features'''], attention_mask=_lowerCamelCase ) if return_tensors is not None: __A = padded_inputs.convert_to_tensors(_lowerCamelCase ) return padded_inputs
266
1
"""simple docstring""" from collections import Counter from timeit import timeit def lowerCAmelCase ( __UpperCamelCase = "" , ): """simple docstring""" return sum(c % 2 for c in Counter(input_str.replace(''' ''' , '''''' ).lower() ).values() ) < 2 def lowerCAmelCase ( __UpperCamelCase = "" ): """simple docstring""" if len(__UpperCamelCase ) == 0: return True __A = input_str.replace(''' ''' , '''''' ).lower() # character_freq_dict: Stores the frequency of every character in the input string __A = {} for character in lower_case_input_str: __A = character_freq_dict.get(__UpperCamelCase , 0 ) + 1 __A = 0 for character_count in character_freq_dict.values(): if character_count % 2: odd_char += 1 if odd_char > 1: return False return True def lowerCAmelCase ( __UpperCamelCase = "" ): """simple docstring""" print('''\nFor string = ''' , __UpperCamelCase , ''':''' ) print( '''> can_string_be_rearranged_as_palindrome_counter()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome_counter(__UpperCamelCase ) , '''\ttime =''' , timeit( '''z.can_string_be_rearranged_as_palindrome_counter(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , ) print( '''> can_string_be_rearranged_as_palindrome()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome(__UpperCamelCase ) , '''\ttime =''' , timeit( '''z.can_string_be_rearranged_as_palindrome(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , ) if __name__ == "__main__": lowercase_ = input( 'Enter string to determine if it can be rearranged as a palindrome or not: ' ).strip() benchmark(check_str) lowercase_ = can_string_be_rearranged_as_palindrome_counter(check_str) print(F'''{check_str} can {"" if status else "not "}be rearranged as a palindrome''')
266
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class snake_case ( unittest.TestCase ): '''simple docstring''' def __init__( self : List[str], _lowerCamelCase : Optional[Any], _lowerCamelCase : Union[str, Any]=13, _lowerCamelCase : Any=3, _lowerCamelCase : Optional[int]=2_24, _lowerCamelCase : str=30, _lowerCamelCase : Dict=4_00, _lowerCamelCase : Union[str, Any]=True, _lowerCamelCase : Any=None, _lowerCamelCase : Optional[Any]=True, _lowerCamelCase : Any=[0.5, 0.5, 0.5], _lowerCamelCase : List[str]=[0.5, 0.5, 0.5], ): '''simple docstring''' __A = size if size is not None else {'''height''': 18, '''width''': 18} __A = parent __A = batch_size __A = num_channels __A = image_size __A = min_resolution __A = max_resolution __A = do_resize __A = size __A = do_normalize __A = image_mean __A = image_std def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : str = ViTImageProcessor if is_vision_available() else None def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = EfficientFormerImageProcessorTester(self ) @property def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' return self.image_proc_tester.prepare_image_processor_dict() def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCamelCase, '''image_mean''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''image_std''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''do_normalize''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''do_resize''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''size''' ) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' # Initialize image_processor __A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __A = prepare_image_inputs(self.image_proc_tester, equal_resolution=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, Image.Image ) # Test not batched input __A = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), ) # Test batched __A = image_processor(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), ) def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' # Initialize image_processor __A = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __A = prepare_image_inputs(self.image_proc_tester, equal_resolution=_lowerCamelCase, numpify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, np.ndarray ) # Test not batched input __A = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), ) # Test batched __A = image_processor(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), ) def _SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' # Initialize image_processor __A = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __A = prepare_image_inputs(self.image_proc_tester, equal_resolution=_lowerCamelCase, torchify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, torch.Tensor ) # Test not batched input __A = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), ) # Test batched __A = image_processor(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), )
266
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { 'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/config.json', 'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/config.json' # See all FNet models at https://huggingface.co/models?filter=fnet } class snake_case ( _lowerCAmelCase ): '''simple docstring''' A_ : Dict = "fnet" def __init__( self : str, _lowerCamelCase : Any=3_20_00, _lowerCamelCase : int=7_68, _lowerCamelCase : List[str]=12, _lowerCamelCase : Union[str, Any]=30_72, _lowerCamelCase : Union[str, Any]="gelu_new", _lowerCamelCase : Optional[int]=0.1, _lowerCamelCase : int=5_12, _lowerCamelCase : Dict=4, _lowerCamelCase : List[Any]=0.02, _lowerCamelCase : Any=1e-12, _lowerCamelCase : Dict=False, _lowerCamelCase : Union[str, Any]=5_12, _lowerCamelCase : Optional[Any]=3, _lowerCamelCase : str=1, _lowerCamelCase : Dict=2, **_lowerCamelCase : Any, ): '''simple docstring''' super().__init__(pad_token_id=_lowerCamelCase, bos_token_id=_lowerCamelCase, eos_token_id=_lowerCamelCase, **_lowerCamelCase ) __A = vocab_size __A = max_position_embeddings __A = hidden_size __A = num_hidden_layers __A = intermediate_size __A = hidden_act __A = hidden_dropout_prob __A = initializer_range __A = type_vocab_size __A = layer_norm_eps __A = use_tpu_fourier_optimizations __A = tpu_short_seq_length
266
"""simple docstring""" import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor lowercase_ = logging.get_logger(__name__) class snake_case ( _lowerCAmelCase ): '''simple docstring''' def __init__( self : Optional[int], *_lowerCamelCase : Union[str, Any], **_lowerCamelCase : Dict ): '''simple docstring''' warnings.warn( '''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use SegformerImageProcessor instead.''', _lowerCamelCase, ) super().__init__(*_lowerCamelCase, **_lowerCamelCase )
266
1
"""simple docstring""" import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast @require_vision class snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : List[str] ): '''simple docstring''' __A = tempfile.mkdtemp() __A = BlipImageProcessor() __A = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' ) __A = BlipaProcessor(_lowerCamelCase, _lowerCamelCase ) processor.save_pretrained(self.tmpdirname ) def _SCREAMING_SNAKE_CASE ( self : Tuple, **_lowerCamelCase : List[Any] ): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname, **_lowerCamelCase ).tokenizer def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], **_lowerCamelCase : str ): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname, **_lowerCamelCase ).image_processor def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' __A = [np.random.randint(2_55, size=(3, 30, 4_00), dtype=np.uinta )] __A = [Image.fromarray(np.moveaxis(_lowerCamelCase, 0, -1 ) ) for x in image_inputs] return image_inputs def _SCREAMING_SNAKE_CASE ( self : List[str] ): '''simple docstring''' __A = BlipaProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __A = self.get_tokenizer(bos_token='''(BOS)''', eos_token='''(EOS)''' ) __A = self.get_image_processor(do_normalize=_lowerCamelCase, padding_value=1.0 ) __A = BlipaProcessor.from_pretrained( self.tmpdirname, bos_token='''(BOS)''', eos_token='''(EOS)''', do_normalize=_lowerCamelCase, padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer, _lowerCamelCase ) self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor, _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' __A = self.get_image_processor() __A = self.get_tokenizer() __A = BlipaProcessor(tokenizer=_lowerCamelCase, image_processor=_lowerCamelCase ) __A = self.prepare_image_inputs() __A = image_processor(_lowerCamelCase, return_tensors='''np''' ) __A = processor(images=_lowerCamelCase, return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2 ) def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' __A = self.get_image_processor() __A = self.get_tokenizer() __A = BlipaProcessor(tokenizer=_lowerCamelCase, image_processor=_lowerCamelCase ) __A = '''lower newer''' __A = processor(text=_lowerCamelCase ) __A = tokenizer(_lowerCamelCase, return_token_type_ids=_lowerCamelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key] ) def _SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' __A = self.get_image_processor() __A = self.get_tokenizer() __A = BlipaProcessor(tokenizer=_lowerCamelCase, image_processor=_lowerCamelCase ) __A = '''lower newer''' __A = self.prepare_image_inputs() __A = processor(text=_lowerCamelCase, images=_lowerCamelCase ) self.assertListEqual(list(inputs.keys() ), ['''pixel_values''', '''input_ids''', '''attention_mask'''] ) # test if it raises when no input is passed with pytest.raises(_lowerCamelCase ): processor() def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' __A = self.get_image_processor() __A = self.get_tokenizer() __A = BlipaProcessor(tokenizer=_lowerCamelCase, image_processor=_lowerCamelCase ) __A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __A = processor.batch_decode(_lowerCamelCase ) __A = tokenizer.batch_decode(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase, _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = self.get_image_processor() __A = self.get_tokenizer() __A = BlipaProcessor(tokenizer=_lowerCamelCase, image_processor=_lowerCamelCase ) __A = '''lower newer''' __A = self.prepare_image_inputs() __A = processor(text=_lowerCamelCase, images=_lowerCamelCase ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ), ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
266
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class snake_case ( unittest.TestCase ): '''simple docstring''' def __init__( self : List[Any], _lowerCamelCase : int, _lowerCamelCase : List[Any]=7, _lowerCamelCase : int=3, _lowerCamelCase : Optional[Any]=18, _lowerCamelCase : Any=30, _lowerCamelCase : str=4_00, _lowerCamelCase : int=True, _lowerCamelCase : Union[str, Any]=None, _lowerCamelCase : str=True, ): '''simple docstring''' __A = size if size is not None else {'''height''': 18, '''width''': 18} __A = parent __A = batch_size __A = num_channels __A = image_size __A = min_resolution __A = max_resolution __A = do_resize __A = size __A = apply_ocr def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : Optional[int] = LayoutLMvaImageProcessor if is_pytesseract_available() else None def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = LayoutLMvaImageProcessingTester(self ) @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' __A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCamelCase, '''do_resize''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''size''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''apply_ocr''' ) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' __A = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size, {'''height''': 18, '''width''': 18} ) __A = self.image_processing_class.from_dict(self.image_processor_dict, size=42 ) self.assertEqual(image_processor.size, {'''height''': 42, '''width''': 42} ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' # Initialize image_processing __A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, Image.Image ) # Test not batched input __A = image_processing(image_inputs[0], return_tensors='''pt''' ) self.assertEqual( encoding.pixel_values.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) self.assertIsInstance(encoding.words, _lowerCamelCase ) self.assertIsInstance(encoding.boxes, _lowerCamelCase ) # Test batched __A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' # Initialize image_processing __A = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase, numpify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, np.ndarray ) # Test not batched input __A = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) # Test batched __A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' # Initialize image_processing __A = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase, torchify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, torch.Tensor ) # Test not batched input __A = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) # Test batched __A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) def _SCREAMING_SNAKE_CASE ( self : List[str] ): '''simple docstring''' # with apply_OCR = True __A = LayoutLMvaImageProcessor() from datasets import load_dataset __A = load_dataset('''hf-internal-testing/fixtures_docvqa''', split='''test''' ) __A = Image.open(ds[0]['''file'''] ).convert('''RGB''' ) __A = image_processing(_lowerCamelCase, return_tensors='''pt''' ) self.assertEqual(encoding.pixel_values.shape, (1, 3, 2_24, 2_24) ) self.assertEqual(len(encoding.words ), len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 __A = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231 __A = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words, _lowerCamelCase ) self.assertListEqual(encoding.boxes, _lowerCamelCase ) # with apply_OCR = False __A = LayoutLMvaImageProcessor(apply_ocr=_lowerCamelCase ) __A = image_processing(_lowerCamelCase, return_tensors='''pt''' ) self.assertEqual(encoding.pixel_values.shape, (1, 3, 2_24, 2_24) )
266
1
"""simple docstring""" from __future__ import annotations class snake_case : '''simple docstring''' def __init__( self : int, _lowerCamelCase : List[Any]=None ): '''simple docstring''' __A = data __A = None def __repr__( self : Union[str, Any] ): '''simple docstring''' __A = [] __A = self while temp: string_rep.append(f'{temp.data}' ) __A = temp.next return "->".join(_lowerCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" if not elements_list: raise Exception('''The Elements List is empty''' ) __A = __A = Node(elements_list[0] ) for i in range(1 , len(__UpperCamelCase ) ): __A = Node(elements_list[i] ) __A = current.next return head def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" if head_node is not None and isinstance(__UpperCamelCase , __UpperCamelCase ): print_reverse(head_node.next ) print(head_node.data ) def lowerCAmelCase ( ): """simple docstring""" from doctest import testmod testmod() __A = make_linked_list([1_4, 5_2, 1_4, 1_2, 4_3] ) print('''Linked List:''' ) print(__UpperCamelCase ) print('''Elements in Reverse:''' ) print_reverse(__UpperCamelCase ) if __name__ == "__main__": main()
266
"""simple docstring""" import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class snake_case ( ctypes.Structure ): '''simple docstring''' A_ : List[str] = [("size", ctypes.c_int), ("visible", ctypes.c_byte)] def lowerCAmelCase ( ): """simple docstring""" if os.name == "nt": __A = CursorInfo() __A = ctypes.windll.kernelaa.GetStdHandle(-1_1 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) ) __A = False ctypes.windll.kernelaa.SetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) ) elif os.name == "posix": sys.stdout.write('''\033[?25l''' ) sys.stdout.flush() def lowerCAmelCase ( ): """simple docstring""" if os.name == "nt": __A = CursorInfo() __A = ctypes.windll.kernelaa.GetStdHandle(-1_1 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) ) __A = True ctypes.windll.kernelaa.SetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) ) elif os.name == "posix": sys.stdout.write('''\033[?25h''' ) sys.stdout.flush() @contextmanager def lowerCAmelCase ( ): """simple docstring""" try: hide_cursor() yield finally: show_cursor()
266
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowercase_ = { 'configuration_groupvit': [ 'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GroupViTConfig', 'GroupViTOnnxConfig', 'GroupViTTextConfig', 'GroupViTVisionConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ 'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'GroupViTModel', 'GroupViTPreTrainedModel', 'GroupViTTextModel', 'GroupViTVisionModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ 'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFGroupViTModel', 'TFGroupViTPreTrainedModel', 'TFGroupViTTextModel', 'TFGroupViTVisionModel', ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
266
"""simple docstring""" import argparse import struct import unittest class snake_case : '''simple docstring''' def __init__( self : Optional[int], _lowerCamelCase : bytes ): '''simple docstring''' __A = data # Initialize hash values __A = [ 0X6a_09e_667, 0Xbb_67a_e85, 0X3c_6ef_372, 0Xa5_4ff_53a, 0X51_0e5_27f, 0X9b_056_88c, 0X1f_83d_9ab, 0X5b_e0c_d19, ] # Initialize round constants __A = [ 0X42_8a2_f98, 0X71_374_491, 0Xb5_c0f_bcf, 0Xe9_b5d_ba5, 0X39_56c_25b, 0X59_f11_1f1, 0X92_3f8_2a4, 0Xab_1c5_ed5, 0Xd8_07a_a98, 0X12_835_b01, 0X24_318_5be, 0X55_0c7_dc3, 0X72_be5_d74, 0X80_deb_1fe, 0X9b_dc0_6a7, 0Xc1_9bf_174, 0Xe4_9b6_9c1, 0Xef_be4_786, 0X0f_c19_dc6, 0X24_0ca_1cc, 0X2d_e92_c6f, 0X4a_748_4aa, 0X5c_b0a_9dc, 0X76_f98_8da, 0X98_3e5_152, 0Xa8_31c_66d, 0Xb0_032_7c8, 0Xbf_597_fc7, 0Xc6_e00_bf3, 0Xd5_a79_147, 0X06_ca6_351, 0X14_292_967, 0X27_b70_a85, 0X2e_1b2_138, 0X4d_2c6_dfc, 0X53_380_d13, 0X65_0a7_354, 0X76_6a0_abb, 0X81_c2c_92e, 0X92_722_c85, 0Xa2_bfe_8a1, 0Xa8_1a6_64b, 0Xc2_4b8_b70, 0Xc7_6c5_1a3, 0Xd1_92e_819, 0Xd6_990_624, 0Xf4_0e3_585, 0X10_6aa_070, 0X19_a4c_116, 0X1e_376_c08, 0X27_487_74c, 0X34_b0b_cb5, 0X39_1c0_cb3, 0X4e_d8a_a4a, 0X5b_9cc_a4f, 0X68_2e6_ff3, 0X74_8f8_2ee, 0X78_a56_36f, 0X84_c87_814, 0X8c_c70_208, 0X90_bef_ffa, 0Xa4_506_ceb, 0Xbe_f9a_3f7, 0Xc6_717_8f2, ] __A = self.preprocessing(self.data ) self.final_hash() @staticmethod def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : bytes ): '''simple docstring''' __A = b'''\x80''' + (b'''\x00''' * (63 - (len(_lowerCamelCase ) + 8) % 64)) __A = struct.pack('''>Q''', (len(_lowerCamelCase ) * 8) ) return data + padding + big_endian_integer def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' # Convert into blocks of 64 bytes __A = [ self.preprocessed_data[x : x + 64] for x in range(0, len(self.preprocessed_data ), 64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers __A = list(struct.unpack('''>16L''', _lowerCamelCase ) ) # add 48 0-ed integers words += [0] * 48 __A , __A , __A , __A , __A , __A , __A , __A = self.hashes for index in range(0, 64 ): if index > 15: # modify the zero-ed indexes at the end of the array __A = ( self.ror(words[index - 15], 7 ) ^ self.ror(words[index - 15], 18 ) ^ (words[index - 15] >> 3) ) __A = ( self.ror(words[index - 2], 17 ) ^ self.ror(words[index - 2], 19 ) ^ (words[index - 2] >> 10) ) __A = ( words[index - 16] + sa + words[index - 7] + sa ) % 0X100_000_000 # Compression __A = self.ror(_lowerCamelCase, 6 ) ^ self.ror(_lowerCamelCase, 11 ) ^ self.ror(_lowerCamelCase, 25 ) __A = (e & f) ^ ((~e & 0Xff_fff_fff) & g) __A = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0X100_000_000 __A = self.ror(_lowerCamelCase, 2 ) ^ self.ror(_lowerCamelCase, 13 ) ^ self.ror(_lowerCamelCase, 22 ) __A = (a & b) ^ (a & c) ^ (b & c) __A = (sa + maj) % 0X100_000_000 __A , __A , __A , __A , __A , __A , __A , __A = ( g, f, e, ((d + tempa) % 0X100_000_000), c, b, a, ((tempa + tempa) % 0X100_000_000), ) __A = [a, b, c, d, e, f, g, h] # Modify final values __A = [ ((element + mutated_hash_values[index]) % 0X100_000_000) for index, element in enumerate(self.hashes ) ] __A = ''''''.join([hex(_lowerCamelCase )[2:].zfill(8 ) for value in self.hashes] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : int, _lowerCamelCase : int ): '''simple docstring''' return 0Xff_fff_fff & (value << (32 - rotations)) | (value >> rotations) class snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' import hashlib __A = bytes('''Test String''', '''utf-8''' ) self.assertEqual(SHAaaa(_lowerCamelCase ).hash, hashlib.shaaaa(_lowerCamelCase ).hexdigest() ) def lowerCAmelCase ( ): """simple docstring""" import doctest doctest.testmod() __A = argparse.ArgumentParser() parser.add_argument( '''-s''' , '''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , ) parser.add_argument( '''-f''' , '''--file''' , dest='''input_file''' , help='''Hash contents of a file''' ) __A = parser.parse_args() __A = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , '''rb''' ) as f: __A = f.read() else: __A = bytes(__UpperCamelCase , '''utf-8''' ) print(SHAaaa(__UpperCamelCase ).hash ) if __name__ == "__main__": main()
266
1
"""simple docstring""" from dataclasses import dataclass from typing import Optional import numpy as np import torch import torch.nn as nn from ..utils import BaseOutput, is_torch_version, randn_tensor from .attention_processor import SpatialNorm from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block @dataclass class snake_case ( _lowerCAmelCase ): '''simple docstring''' A_ : torch.FloatTensor class snake_case ( nn.Module ): '''simple docstring''' def __init__( self : Optional[int], _lowerCamelCase : List[Any]=3, _lowerCamelCase : List[str]=3, _lowerCamelCase : Optional[int]=("DownEncoderBlock2D",), _lowerCamelCase : List[str]=(64,), _lowerCamelCase : int=2, _lowerCamelCase : str=32, _lowerCamelCase : Optional[Any]="silu", _lowerCamelCase : Optional[int]=True, ): '''simple docstring''' super().__init__() __A = layers_per_block __A = torch.nn.Convad( _lowerCamelCase, block_out_channels[0], kernel_size=3, stride=1, padding=1, ) __A = None __A = nn.ModuleList([] ) # down __A = block_out_channels[0] for i, down_block_type in enumerate(_lowerCamelCase ): __A = output_channel __A = block_out_channels[i] __A = i == len(_lowerCamelCase ) - 1 __A = get_down_block( _lowerCamelCase, num_layers=self.layers_per_block, in_channels=_lowerCamelCase, out_channels=_lowerCamelCase, add_downsample=not is_final_block, resnet_eps=1e-6, downsample_padding=0, resnet_act_fn=_lowerCamelCase, resnet_groups=_lowerCamelCase, attention_head_dim=_lowerCamelCase, temb_channels=_lowerCamelCase, ) self.down_blocks.append(_lowerCamelCase ) # mid __A = UNetMidBlockaD( in_channels=block_out_channels[-1], resnet_eps=1e-6, resnet_act_fn=_lowerCamelCase, output_scale_factor=1, resnet_time_scale_shift='''default''', attention_head_dim=block_out_channels[-1], resnet_groups=_lowerCamelCase, temb_channels=_lowerCamelCase, ) # out __A = nn.GroupNorm(num_channels=block_out_channels[-1], num_groups=_lowerCamelCase, eps=1e-6 ) __A = nn.SiLU() __A = 2 * out_channels if double_z else out_channels __A = nn.Convad(block_out_channels[-1], _lowerCamelCase, 3, padding=1 ) __A = False def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : Dict ): '''simple docstring''' __A = x __A = self.conv_in(_lowerCamelCase ) if self.training and self.gradient_checkpointing: def create_custom_forward(_lowerCamelCase : Optional[Any] ): def custom_forward(*_lowerCamelCase : Optional[Any] ): return module(*_lowerCamelCase ) return custom_forward # down if is_torch_version('''>=''', '''1.11.0''' ): for down_block in self.down_blocks: __A = torch.utils.checkpoint.checkpoint( create_custom_forward(_lowerCamelCase ), _lowerCamelCase, use_reentrant=_lowerCamelCase ) # middle __A = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ), _lowerCamelCase, use_reentrant=_lowerCamelCase ) else: for down_block in self.down_blocks: __A = torch.utils.checkpoint.checkpoint(create_custom_forward(_lowerCamelCase ), _lowerCamelCase ) # middle __A = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ), _lowerCamelCase ) else: # down for down_block in self.down_blocks: __A = down_block(_lowerCamelCase ) # middle __A = self.mid_block(_lowerCamelCase ) # post-process __A = self.conv_norm_out(_lowerCamelCase ) __A = self.conv_act(_lowerCamelCase ) __A = self.conv_out(_lowerCamelCase ) return sample class snake_case ( nn.Module ): '''simple docstring''' def __init__( self : Optional[Any], _lowerCamelCase : List[str]=3, _lowerCamelCase : List[Any]=3, _lowerCamelCase : Dict=("UpDecoderBlock2D",), _lowerCamelCase : List[str]=(64,), _lowerCamelCase : str=2, _lowerCamelCase : List[str]=32, _lowerCamelCase : Dict="silu", _lowerCamelCase : Optional[int]="group", ): '''simple docstring''' super().__init__() __A = layers_per_block __A = nn.Convad( _lowerCamelCase, block_out_channels[-1], kernel_size=3, stride=1, padding=1, ) __A = None __A = nn.ModuleList([] ) __A = in_channels if norm_type == '''spatial''' else None # mid __A = UNetMidBlockaD( in_channels=block_out_channels[-1], resnet_eps=1e-6, resnet_act_fn=_lowerCamelCase, output_scale_factor=1, resnet_time_scale_shift='''default''' if norm_type == '''group''' else norm_type, attention_head_dim=block_out_channels[-1], resnet_groups=_lowerCamelCase, temb_channels=_lowerCamelCase, ) # up __A = list(reversed(_lowerCamelCase ) ) __A = reversed_block_out_channels[0] for i, up_block_type in enumerate(_lowerCamelCase ): __A = output_channel __A = reversed_block_out_channels[i] __A = i == len(_lowerCamelCase ) - 1 __A = get_up_block( _lowerCamelCase, num_layers=self.layers_per_block + 1, in_channels=_lowerCamelCase, out_channels=_lowerCamelCase, prev_output_channel=_lowerCamelCase, add_upsample=not is_final_block, resnet_eps=1e-6, resnet_act_fn=_lowerCamelCase, resnet_groups=_lowerCamelCase, attention_head_dim=_lowerCamelCase, temb_channels=_lowerCamelCase, resnet_time_scale_shift=_lowerCamelCase, ) self.up_blocks.append(_lowerCamelCase ) __A = output_channel # out if norm_type == "spatial": __A = SpatialNorm(block_out_channels[0], _lowerCamelCase ) else: __A = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=_lowerCamelCase, eps=1e-6 ) __A = nn.SiLU() __A = nn.Convad(block_out_channels[0], _lowerCamelCase, 3, padding=1 ) __A = False def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : Optional[Any], _lowerCamelCase : Union[str, Any]=None ): '''simple docstring''' __A = z __A = self.conv_in(_lowerCamelCase ) __A = next(iter(self.up_blocks.parameters() ) ).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(_lowerCamelCase : str ): def custom_forward(*_lowerCamelCase : Optional[int] ): return module(*_lowerCamelCase ) return custom_forward if is_torch_version('''>=''', '''1.11.0''' ): # middle __A = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ), _lowerCamelCase, _lowerCamelCase, use_reentrant=_lowerCamelCase ) __A = sample.to(_lowerCamelCase ) # up for up_block in self.up_blocks: __A = torch.utils.checkpoint.checkpoint( create_custom_forward(_lowerCamelCase ), _lowerCamelCase, _lowerCamelCase, use_reentrant=_lowerCamelCase ) else: # middle __A = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ), _lowerCamelCase, _lowerCamelCase ) __A = sample.to(_lowerCamelCase ) # up for up_block in self.up_blocks: __A = torch.utils.checkpoint.checkpoint(create_custom_forward(_lowerCamelCase ), _lowerCamelCase, _lowerCamelCase ) else: # middle __A = self.mid_block(_lowerCamelCase, _lowerCamelCase ) __A = sample.to(_lowerCamelCase ) # up for up_block in self.up_blocks: __A = up_block(_lowerCamelCase, _lowerCamelCase ) # post-process if latent_embeds is None: __A = self.conv_norm_out(_lowerCamelCase ) else: __A = self.conv_norm_out(_lowerCamelCase, _lowerCamelCase ) __A = self.conv_act(_lowerCamelCase ) __A = self.conv_out(_lowerCamelCase ) return sample class snake_case ( nn.Module ): '''simple docstring''' def __init__( self : Union[str, Any], _lowerCamelCase : int, _lowerCamelCase : str, _lowerCamelCase : Tuple, _lowerCamelCase : Any=None, _lowerCamelCase : Tuple="random", _lowerCamelCase : Dict=False, _lowerCamelCase : Any=True ): '''simple docstring''' super().__init__() __A = n_e __A = vq_embed_dim __A = beta __A = legacy __A = nn.Embedding(self.n_e, self.vq_embed_dim ) self.embedding.weight.data.uniform_(-1.0 / self.n_e, 1.0 / self.n_e ) __A = remap if self.remap is not None: self.register_buffer('''used''', torch.tensor(np.load(self.remap ) ) ) __A = self.used.shape[0] __A = unknown_index # "random" or "extra" or integer if self.unknown_index == "extra": __A = self.re_embed __A = self.re_embed + 1 print( f'Remapping {self.n_e} indices to {self.re_embed} indices. ' f'Using {self.unknown_index} for unknown indices.' ) else: __A = n_e __A = sane_index_shape def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : int ): '''simple docstring''' __A = inds.shape assert len(_lowerCamelCase ) > 1 __A = inds.reshape(ishape[0], -1 ) __A = self.used.to(_lowerCamelCase ) __A = (inds[:, :, None] == used[None, None, ...]).long() __A = match.argmax(-1 ) __A = match.sum(2 ) < 1 if self.unknown_index == "random": __A = torch.randint(0, self.re_embed, size=new[unknown].shape ).to(device=new.device ) else: __A = self.unknown_index return new.reshape(_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : Union[str, Any] ): '''simple docstring''' __A = inds.shape assert len(_lowerCamelCase ) > 1 __A = inds.reshape(ishape[0], -1 ) __A = self.used.to(_lowerCamelCase ) if self.re_embed > self.used.shape[0]: # extra token __A = 0 # simply set to zero __A = torch.gather(used[None, :][inds.shape[0] * [0], :], 1, _lowerCamelCase ) return back.reshape(_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : Any ): '''simple docstring''' # reshape z -> (batch, height, width, channel) and flatten __A = z.permute(0, 2, 3, 1 ).contiguous() __A = z.view(-1, self.vq_embed_dim ) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z __A = torch.argmin(torch.cdist(_lowerCamelCase, self.embedding.weight ), dim=1 ) __A = self.embedding(_lowerCamelCase ).view(z.shape ) __A = None __A = None # compute loss for embedding if not self.legacy: __A = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 ) else: __A = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 ) # preserve gradients __A = z + (z_q - z).detach() # reshape back to match original input shape __A = z_q.permute(0, 3, 1, 2 ).contiguous() if self.remap is not None: __A = min_encoding_indices.reshape(z.shape[0], -1 ) # add batch axis __A = self.remap_to_used(_lowerCamelCase ) __A = min_encoding_indices.reshape(-1, 1 ) # flatten if self.sane_index_shape: __A = min_encoding_indices.reshape(z_q.shape[0], z_q.shape[2], z_q.shape[3] ) return z_q, loss, (perplexity, min_encodings, min_encoding_indices) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : Optional[Any], _lowerCamelCase : List[Any] ): '''simple docstring''' # shape specifying (batch, height, width, channel) if self.remap is not None: __A = indices.reshape(shape[0], -1 ) # add batch axis __A = self.unmap_to_all(_lowerCamelCase ) __A = indices.reshape(-1 ) # flatten again # get quantized latent vectors __A = self.embedding(_lowerCamelCase ) if shape is not None: __A = z_q.view(_lowerCamelCase ) # reshape back to match original input shape __A = z_q.permute(0, 3, 1, 2 ).contiguous() return z_q class snake_case ( _lowerCAmelCase ): '''simple docstring''' def __init__( self : Any, _lowerCamelCase : str, _lowerCamelCase : List[Any]=False ): '''simple docstring''' __A = parameters __A , __A = torch.chunk(_lowerCamelCase, 2, dim=1 ) __A = torch.clamp(self.logvar, -30.0, 20.0 ) __A = deterministic __A = torch.exp(0.5 * self.logvar ) __A = torch.exp(self.logvar ) if self.deterministic: __A = __A = torch.zeros_like( self.mean, device=self.parameters.device, dtype=self.parameters.dtype ) def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : Optional[torch.Generator] = None ): '''simple docstring''' # make sure sample is on the same device as the parameters and has same dtype __A = randn_tensor( self.mean.shape, generator=_lowerCamelCase, device=self.parameters.device, dtype=self.parameters.dtype ) __A = self.mean + self.std * sample return x def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : Optional[int]=None ): '''simple docstring''' if self.deterministic: return torch.Tensor([0.0] ) else: if other is None: return 0.5 * torch.sum(torch.pow(self.mean, 2 ) + self.var - 1.0 - self.logvar, dim=[1, 2, 3] ) else: return 0.5 * torch.sum( torch.pow(self.mean - other.mean, 2 ) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar, dim=[1, 2, 3], ) def _SCREAMING_SNAKE_CASE ( self : int, _lowerCamelCase : Optional[Any], _lowerCamelCase : Tuple=[1, 2, 3] ): '''simple docstring''' if self.deterministic: return torch.Tensor([0.0] ) __A = np.log(2.0 * np.pi ) return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean, 2 ) / self.var, dim=_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' return self.mean
266
"""simple docstring""" import faiss # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import requests # noqa: F401 # Here to have a nice missing dependency error message early on import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on from mauve import compute_mauve # From: mauve-text import datasets lowercase_ = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n' lowercase_ = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n' lowercase_ = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case ( datasets.Metric ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, homepage='''https://github.com/krishnap25/mauve''', inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { '''predictions''': datasets.Value('''string''', id='''sequence''' ), '''references''': datasets.Value('''string''', id='''sequence''' ), } ), codebase_urls=['''https://github.com/krishnap25/mauve'''], reference_urls=[ '''https://arxiv.org/abs/2102.01454''', '''https://github.com/krishnap25/mauve''', ], ) def _SCREAMING_SNAKE_CASE ( self : int, _lowerCamelCase : str, _lowerCamelCase : Optional[Any], _lowerCamelCase : Any=None, _lowerCamelCase : Tuple=None, _lowerCamelCase : Optional[Any]=None, _lowerCamelCase : Union[str, Any]=None, _lowerCamelCase : str="auto", _lowerCamelCase : Union[str, Any]=-1, _lowerCamelCase : List[str]=0.9, _lowerCamelCase : int=5, _lowerCamelCase : Tuple=5_00, _lowerCamelCase : Union[str, Any]="gpt2-large", _lowerCamelCase : int=-1, _lowerCamelCase : Union[str, Any]=10_24, _lowerCamelCase : Union[str, Any]=25, _lowerCamelCase : str=5, _lowerCamelCase : Any=True, _lowerCamelCase : Union[str, Any]=25, ): '''simple docstring''' __A = compute_mauve( p_text=_lowerCamelCase, q_text=_lowerCamelCase, p_features=_lowerCamelCase, q_features=_lowerCamelCase, p_tokens=_lowerCamelCase, q_tokens=_lowerCamelCase, num_buckets=_lowerCamelCase, pca_max_data=_lowerCamelCase, kmeans_explained_var=_lowerCamelCase, kmeans_num_redo=_lowerCamelCase, kmeans_max_iter=_lowerCamelCase, featurize_model_name=_lowerCamelCase, device_id=_lowerCamelCase, max_text_length=_lowerCamelCase, divergence_curve_discretization_size=_lowerCamelCase, mauve_scaling_factor=_lowerCamelCase, verbose=_lowerCamelCase, seed=_lowerCamelCase, ) return out
266
1
"""simple docstring""" def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" if a < 0: raise ValueError('''Input value must be a positive integer''' ) elif isinstance(__UpperCamelCase , __UpperCamelCase ): raise TypeError('''Input value must be a \'int\' type''' ) return bin(__UpperCamelCase ).count('''1''' ) if __name__ == "__main__": import doctest doctest.testmod()
266
"""simple docstring""" import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs lowercase_ = imread(R'digital_image_processing/image_data/lena_small.jpg') lowercase_ = cvtColor(img, COLOR_BGR2GRAY) def lowerCAmelCase ( ): """simple docstring""" __A = cn.convert_to_negative(__UpperCamelCase ) # assert negative_img array for at least one True assert negative_img.any() def lowerCAmelCase ( ): """simple docstring""" with Image.open('''digital_image_processing/image_data/lena_small.jpg''' ) as img: # Work around assertion for response assert str(cc.change_contrast(__UpperCamelCase , 1_1_0 ) ).startswith( '''<PIL.Image.Image image mode=RGB size=100x100 at''' ) def lowerCAmelCase ( ): """simple docstring""" __A = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def lowerCAmelCase ( ): """simple docstring""" __A = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0 ) # assert ambiguous array for all == True assert canny_img.all() __A = canny.canny(__UpperCamelCase ) # assert canny array for at least one True assert canny_array.any() def lowerCAmelCase ( ): """simple docstring""" assert gg.gaussian_filter(__UpperCamelCase , 5 , sigma=0.9 ).all() def lowerCAmelCase ( ): """simple docstring""" __A = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) __A = conv.img_convolve(__UpperCamelCase , __UpperCamelCase ).astype(__UpperCamelCase ) assert res.any() def lowerCAmelCase ( ): """simple docstring""" assert med.median_filter(__UpperCamelCase , 3 ).any() def lowerCAmelCase ( ): """simple docstring""" __A , __A = sob.sobel_filter(__UpperCamelCase ) assert grad.any() and theta.any() def lowerCAmelCase ( ): """simple docstring""" __A = sp.make_sepia(__UpperCamelCase , 2_0 ) assert sepia.all() def lowerCAmelCase ( __UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" ): """simple docstring""" __A = bs.Burkes(imread(__UpperCamelCase , 1 ) , 1_2_0 ) burkes.process() assert burkes.output_img.any() def lowerCAmelCase ( __UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" , ): """simple docstring""" __A = rs.NearestNeighbour(imread(__UpperCamelCase , 1 ) , 4_0_0 , 2_0_0 ) nn.process() assert nn.output.any() def lowerCAmelCase ( ): """simple docstring""" __A = '''digital_image_processing/image_data/lena.jpg''' # Reading the image and converting it to grayscale. __A = imread(__UpperCamelCase , 0 ) # Test for get_neighbors_pixel function() return not None __A = 0 __A = 0 __A = image[x_coordinate][y_coordinate] __A = lbp.get_neighbors_pixel( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image __A = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): __A = lbp.local_binary_value(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) assert lbp_image.any()
266
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor lowercase_ = logging.get_logger(__name__) class snake_case ( _lowerCAmelCase ): '''simple docstring''' def __init__( self : int, *_lowerCamelCase : Tuple, **_lowerCamelCase : str ): '''simple docstring''' warnings.warn( '''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use BeitImageProcessor instead.''', _lowerCamelCase, ) super().__init__(*_lowerCamelCase, **_lowerCamelCase )
266
"""simple docstring""" import itertools import random import unittest import numpy as np from transformers import ASTFeatureExtractor from transformers.testing_utils import require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin lowercase_ = random.Random() if is_torch_available(): import torch def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase=1.0 , __UpperCamelCase=None , __UpperCamelCase=None ): """simple docstring""" if rng is None: __A = global_rng __A = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class snake_case ( unittest.TestCase ): '''simple docstring''' def __init__( self : Any, _lowerCamelCase : List[str], _lowerCamelCase : Any=7, _lowerCamelCase : Optional[int]=4_00, _lowerCamelCase : Optional[int]=20_00, _lowerCamelCase : Dict=1, _lowerCamelCase : Optional[Any]=0.0, _lowerCamelCase : int=1_60_00, _lowerCamelCase : Optional[int]=True, _lowerCamelCase : Dict=True, ): '''simple docstring''' __A = parent __A = batch_size __A = min_seq_length __A = max_seq_length __A = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) __A = feature_size __A = padding_value __A = sampling_rate __A = return_attention_mask __A = do_normalize def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : Optional[Any]=False, _lowerCamelCase : int=False ): '''simple docstring''' def _flatten(_lowerCamelCase : List[str] ): return list(itertools.chain(*_lowerCamelCase ) ) if equal_length: __A = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size __A = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff ) ] if numpify: __A = [np.asarray(_lowerCamelCase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : int = ASTFeatureExtractor def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' __A = ASTFeatureExtractionTester(self ) def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' # Tests that all call wrap to encode_plus and batch_encode_plus __A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 __A = [floats_list((1, x) )[0] for x in range(8_00, 14_00, 2_00 )] __A = [np.asarray(_lowerCamelCase ) for speech_input in speech_inputs] # Test not batched input __A = feat_extract(speech_inputs[0], return_tensors='''np''' ).input_values __A = feat_extract(np_speech_inputs[0], return_tensors='''np''' ).input_values self.assertTrue(np.allclose(_lowerCamelCase, _lowerCamelCase, atol=1e-3 ) ) # Test batched __A = feat_extract(_lowerCamelCase, padding=_lowerCamelCase, return_tensors='''np''' ).input_values __A = feat_extract(_lowerCamelCase, padding=_lowerCamelCase, return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(_lowerCamelCase, _lowerCamelCase ): self.assertTrue(np.allclose(_lowerCamelCase, _lowerCamelCase, atol=1e-3 ) ) # Test 2-D numpy arrays are batched. __A = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)] __A = np.asarray(_lowerCamelCase ) __A = feat_extract(_lowerCamelCase, return_tensors='''np''' ).input_values __A = feat_extract(_lowerCamelCase, return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(_lowerCamelCase, _lowerCamelCase ): self.assertTrue(np.allclose(_lowerCamelCase, _lowerCamelCase, atol=1e-3 ) ) @require_torch def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' import torch __A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __A = np.random.rand(1_00 ).astype(np.floataa ) __A = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: __A = feature_extractor.pad([{'''input_values''': inputs}], return_tensors='''np''' ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) __A = feature_extractor.pad([{'''input_values''': inputs}], return_tensors='''pt''' ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : Union[str, Any] ): '''simple docstring''' from datasets import load_dataset __A = load_dataset('''hf-internal-testing/librispeech_asr_dummy''', '''clean''', split='''validation''' ) # automatic decoding with librispeech __A = ds.sort('''id''' ).select(range(_lowerCamelCase ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] @require_torch def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' # fmt: off __A = torch.tensor( [-0.98_94, -1.27_76, -0.90_66, -1.27_76, -0.93_49, -1.26_09, -1.03_86, -1.27_76, -1.15_61, -1.27_76, -1.20_52, -1.27_23, -1.21_90, -1.21_32, -1.27_76, -1.11_33, -1.19_53, -1.13_43, -1.15_84, -1.22_03, -1.17_70, -1.24_74, -1.23_81, -1.19_36, -0.92_70, -0.83_17, -0.80_49, -0.77_06, -0.75_65, -0.78_69] ) # fmt: on __A = self._load_datasamples(1 ) __A = ASTFeatureExtractor() __A = feature_extractor(_lowerCamelCase, return_tensors='''pt''' ).input_values self.assertEquals(input_values.shape, (1, 10_24, 1_28) ) self.assertTrue(torch.allclose(input_values[0, 0, :30], _lowerCamelCase, atol=1e-4 ) )
266
1
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class snake_case ( unittest.TestCase ): '''simple docstring''' def __init__( self : List[str], _lowerCamelCase : Optional[Any], _lowerCamelCase : Union[str, Any]=13, _lowerCamelCase : Any=3, _lowerCamelCase : Optional[int]=2_24, _lowerCamelCase : str=30, _lowerCamelCase : Dict=4_00, _lowerCamelCase : Union[str, Any]=True, _lowerCamelCase : Any=None, _lowerCamelCase : Optional[Any]=True, _lowerCamelCase : Any=[0.5, 0.5, 0.5], _lowerCamelCase : List[str]=[0.5, 0.5, 0.5], ): '''simple docstring''' __A = size if size is not None else {'''height''': 18, '''width''': 18} __A = parent __A = batch_size __A = num_channels __A = image_size __A = min_resolution __A = max_resolution __A = do_resize __A = size __A = do_normalize __A = image_mean __A = image_std def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : str = ViTImageProcessor if is_vision_available() else None def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = EfficientFormerImageProcessorTester(self ) @property def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' return self.image_proc_tester.prepare_image_processor_dict() def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCamelCase, '''image_mean''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''image_std''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''do_normalize''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''do_resize''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''size''' ) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' # Initialize image_processor __A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __A = prepare_image_inputs(self.image_proc_tester, equal_resolution=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, Image.Image ) # Test not batched input __A = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), ) # Test batched __A = image_processor(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), ) def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' # Initialize image_processor __A = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __A = prepare_image_inputs(self.image_proc_tester, equal_resolution=_lowerCamelCase, numpify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, np.ndarray ) # Test not batched input __A = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), ) # Test batched __A = image_processor(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), ) def _SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' # Initialize image_processor __A = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __A = prepare_image_inputs(self.image_proc_tester, equal_resolution=_lowerCamelCase, torchify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, torch.Tensor ) # Test not batched input __A = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), ) # Test batched __A = image_processor(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), )
266
"""simple docstring""" def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = current_set.copy() for row_index, row in enumerate(__UpperCamelCase ): __A = row[0] for column_index, column in enumerate(__UpperCamelCase ): if magnitude == 0: __A = column continue __A = column / magnitude # Subtract to cancel term __A = current_set[0] __A = [first_row] __A = current_set[1::] for row in current_set: __A = [] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(__UpperCamelCase ) continue for column_index in range(len(__UpperCamelCase ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(__UpperCamelCase ) # Create next recursion iteration set if len(final_set[0] ) != 3: __A = final_set[0] __A = [] __A = [] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) __A = simplify(__UpperCamelCase ) for i in range(len(__UpperCamelCase ) ): resultant[i].insert(0 , current_first_column[i] ) resultant.insert(0 , __UpperCamelCase ) __A = resultant return final_set def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" if len(__UpperCamelCase ) == 0: raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) __A = len(__UpperCamelCase ) + 1 if any(len(__UpperCamelCase ) != _length for item in equations ): raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) for row in equations: if any(not isinstance(__UpperCamelCase , (int, float) ) for column in row ): raise ValueError('''solve_simultaneous() requires lists of integers''' ) if len(__UpperCamelCase ) == 1: return [equations[0][-1] / equations[0][0]] __A = equations.copy() if any(0 in row for row in data_set ): __A = data_set.copy() __A = [] for row_index, row in enumerate(__UpperCamelCase ): if 0 not in row: __A = data_set.pop(__UpperCamelCase ) break if not full_row: raise ValueError('''solve_simultaneous() requires at least 1 full equation''' ) data_set.insert(0 , __UpperCamelCase ) __A = data_set.copy() __A = simplify(__UpperCamelCase ) __A = simplified[::-1] __A = [] for row in simplified: __A = row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue __A = row.copy()[: len(__UpperCamelCase ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(__UpperCamelCase ) == 0: solutions.append(0 ) continue __A = temp_row[1::] __A = temp_row[::-1] for column_index, column in enumerate(__UpperCamelCase ): current_solution -= column * solutions[column_index] solutions.append(__UpperCamelCase ) __A = [] for item in solutions: final.append(float(round(__UpperCamelCase , 5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() lowercase_ = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
266
1
"""simple docstring""" import unittest from transformers import AutoTokenizer, NystromformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, NystromformerModel, ) from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST class snake_case : '''simple docstring''' def __init__( self : Any, _lowerCamelCase : List[str], _lowerCamelCase : Optional[Any]=13, _lowerCamelCase : Dict=7, _lowerCamelCase : Tuple=True, _lowerCamelCase : Optional[int]=True, _lowerCamelCase : Union[str, Any]=True, _lowerCamelCase : Union[str, Any]=True, _lowerCamelCase : List[Any]=99, _lowerCamelCase : Any=32, _lowerCamelCase : Optional[int]=5, _lowerCamelCase : List[Any]=4, _lowerCamelCase : Dict=37, _lowerCamelCase : Union[str, Any]="gelu", _lowerCamelCase : str=0.1, _lowerCamelCase : Optional[Any]=0.1, _lowerCamelCase : Tuple=5_12, _lowerCamelCase : List[str]=16, _lowerCamelCase : List[str]=2, _lowerCamelCase : Dict=0.02, _lowerCamelCase : str=3, _lowerCamelCase : Dict=4, _lowerCamelCase : Union[str, Any]=None, ): '''simple docstring''' __A = parent __A = batch_size __A = seq_length __A = is_training __A = use_input_mask __A = use_token_type_ids __A = use_labels __A = vocab_size __A = hidden_size __A = num_hidden_layers __A = num_attention_heads __A = intermediate_size __A = hidden_act __A = hidden_dropout_prob __A = attention_probs_dropout_prob __A = max_position_embeddings __A = type_vocab_size __A = type_sequence_label_size __A = initializer_range __A = num_labels __A = num_choices __A = scope def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) __A = None if self.use_input_mask: __A = random_attention_mask([self.batch_size, self.seq_length] ) __A = None if self.use_token_type_ids: __A = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size ) __A = None __A = None __A = None if self.use_labels: __A = ids_tensor([self.batch_size], self.type_sequence_label_size ) __A = ids_tensor([self.batch_size, self.seq_length], self.num_labels ) __A = ids_tensor([self.batch_size], self.num_choices ) __A = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' return NystromformerConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=_lowerCamelCase, initializer_range=self.initializer_range, ) def _SCREAMING_SNAKE_CASE ( self : Tuple, _lowerCamelCase : str, _lowerCamelCase : Optional[int], _lowerCamelCase : List[str], _lowerCamelCase : Optional[Any], _lowerCamelCase : str, _lowerCamelCase : List[Any], _lowerCamelCase : Any ): '''simple docstring''' __A = NystromformerModel(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() __A = model(_lowerCamelCase, attention_mask=_lowerCamelCase, token_type_ids=_lowerCamelCase ) __A = model(_lowerCamelCase, token_type_ids=_lowerCamelCase ) __A = model(_lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : Union[str, Any], _lowerCamelCase : Optional[Any], _lowerCamelCase : Optional[Any], _lowerCamelCase : Any, _lowerCamelCase : Dict, _lowerCamelCase : Optional[Any], _lowerCamelCase : Tuple ): '''simple docstring''' __A = NystromformerForMaskedLM(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() __A = model(_lowerCamelCase, attention_mask=_lowerCamelCase, token_type_ids=_lowerCamelCase, labels=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) ) def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : Optional[int], _lowerCamelCase : List[str], _lowerCamelCase : Optional[Any], _lowerCamelCase : Optional[int], _lowerCamelCase : List[Any], _lowerCamelCase : Optional[Any], _lowerCamelCase : List[Any] ): '''simple docstring''' __A = NystromformerForQuestionAnswering(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() __A = model( _lowerCamelCase, attention_mask=_lowerCamelCase, token_type_ids=_lowerCamelCase, start_positions=_lowerCamelCase, end_positions=_lowerCamelCase, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) ) def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : Optional[int], _lowerCamelCase : Union[str, Any], _lowerCamelCase : Any, _lowerCamelCase : List[Any], _lowerCamelCase : Any, _lowerCamelCase : Union[str, Any], _lowerCamelCase : Optional[int] ): '''simple docstring''' __A = self.num_labels __A = NystromformerForSequenceClassification(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() __A = model(_lowerCamelCase, attention_mask=_lowerCamelCase, token_type_ids=_lowerCamelCase, labels=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : str, _lowerCamelCase : List[str], _lowerCamelCase : str, _lowerCamelCase : Dict, _lowerCamelCase : Any, _lowerCamelCase : Any, _lowerCamelCase : Any ): '''simple docstring''' __A = self.num_labels __A = NystromformerForTokenClassification(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() __A = model(_lowerCamelCase, attention_mask=_lowerCamelCase, token_type_ids=_lowerCamelCase, labels=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : Union[str, Any], _lowerCamelCase : str, _lowerCamelCase : Optional[int], _lowerCamelCase : Tuple, _lowerCamelCase : List[str], _lowerCamelCase : List[Any], _lowerCamelCase : Optional[int] ): '''simple docstring''' __A = self.num_choices __A = NystromformerForMultipleChoice(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() __A = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() __A = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() __A = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() __A = model( _lowerCamelCase, attention_mask=_lowerCamelCase, token_type_ids=_lowerCamelCase, labels=_lowerCamelCase, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) ) def _SCREAMING_SNAKE_CASE ( self : List[str] ): '''simple docstring''' __A = self.prepare_config_and_inputs() ( ( __A ) , ( __A ) , ( __A ) , ( __A ) , ( __A ) , ( __A ) , ( __A ) , ) = config_and_inputs __A = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class snake_case ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : List[Any] = ( ( NystromformerModel, NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, ) if is_torch_available() else () ) A_ : Union[str, Any] = ( { "feature-extraction": NystromformerModel, "fill-mask": NystromformerForMaskedLM, "question-answering": NystromformerForQuestionAnswering, "text-classification": NystromformerForSequenceClassification, "token-classification": NystromformerForTokenClassification, "zero-shot": NystromformerForSequenceClassification, } if is_torch_available() else {} ) A_ : Any = False A_ : Optional[int] = False def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = NystromformerModelTester(self ) __A = ConfigTester(self, config_class=_lowerCamelCase, hidden_size=37 ) def _SCREAMING_SNAKE_CASE ( self : List[str] ): '''simple docstring''' self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' __A = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __A = type self.model_tester.create_and_check_model(*_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_lowerCamelCase ) @slow def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __A = NystromformerModel.from_pretrained(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase ) @require_torch class snake_case ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' __A = NystromformerModel.from_pretrained('''uw-madison/nystromformer-512''' ) __A = torch.tensor([[0, 1, 2, 3, 4, 5]] ) with torch.no_grad(): __A = model(_lowerCamelCase )[0] __A = torch.Size((1, 6, 7_68) ) self.assertEqual(output.shape, _lowerCamelCase ) __A = torch.tensor( [[[-0.45_32, -0.09_36, 0.51_37], [-0.26_76, 0.06_28, 0.61_86], [-0.36_29, -0.17_26, 0.47_16]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], _lowerCamelCase, atol=1e-4 ) ) @slow def _SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' __A = '''the [MASK] of Belgium is Brussels''' __A = AutoTokenizer.from_pretrained('''uw-madison/nystromformer-512''' ) __A = NystromformerForMaskedLM.from_pretrained('''uw-madison/nystromformer-512''' ) __A = tokenizer(_lowerCamelCase, return_tensors='''pt''' ) with torch.no_grad(): __A = model(encoding.input_ids ).logits __A = token_logits[:, 2, :].argmax(-1 )[0] self.assertEqual(tokenizer.decode(_lowerCamelCase ), '''capital''' )
266
"""simple docstring""" from __future__ import annotations from typing import Any def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" if not postfix_notation: return 0 __A = {'''+''', '''-''', '''*''', '''/'''} __A = [] for token in postfix_notation: if token in operations: __A , __A = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(__UpperCamelCase ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
266
1
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name class snake_case ( _lowerCAmelCase ): '''simple docstring''' def __init__( self : Tuple, _lowerCamelCase : Optional[int], _lowerCamelCase : int ): '''simple docstring''' super().__init__() self.register_modules(unet=_lowerCamelCase, scheduler=_lowerCamelCase ) @torch.no_grad() def __call__( self : str, _lowerCamelCase : int = 1, _lowerCamelCase : int = 1_00, _lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None, _lowerCamelCase : Optional[float] = None, _lowerCamelCase : bool = True, ): '''simple docstring''' if audio_length_in_s is None: __A = self.unet.config.sample_size / self.unet.config.sample_rate __A = audio_length_in_s * self.unet.config.sample_rate __A = 2 ** len(self.unet.up_blocks ) if sample_size < 3 * down_scale_factor: raise ValueError( f'{audio_length_in_s} is too small. Make sure it\'s bigger or equal to' f' {3 * down_scale_factor / self.unet.config.sample_rate}.' ) __A = int(_lowerCamelCase ) if sample_size % down_scale_factor != 0: __A = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( f'{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled' f' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising' ''' process.''' ) __A = int(_lowerCamelCase ) __A = next(iter(self.unet.parameters() ) ).dtype __A = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(_lowerCamelCase, _lowerCamelCase ) and len(_lowerCamelCase ) != batch_size: raise ValueError( f'You have passed a list of generators of length {len(_lowerCamelCase )}, but requested an effective batch' f' size of {batch_size}. Make sure the batch size matches the length of the generators.' ) __A = randn_tensor(_lowerCamelCase, generator=_lowerCamelCase, device=self.device, dtype=_lowerCamelCase ) # set step values self.scheduler.set_timesteps(_lowerCamelCase, device=audio.device ) __A = self.scheduler.timesteps.to(_lowerCamelCase ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output __A = self.unet(_lowerCamelCase, _lowerCamelCase ).sample # 2. compute previous image: x_t -> t_t-1 __A = self.scheduler.step(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase ).prev_sample __A = audio.clamp(-1, 1 ).float().cpu().numpy() __A = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=_lowerCamelCase )
266
"""simple docstring""" import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class snake_case ( unittest.TestCase ): '''simple docstring''' def __init__( self : Optional[Any], _lowerCamelCase : Tuple, _lowerCamelCase : List[str]=13, _lowerCamelCase : Optional[Any]=7, _lowerCamelCase : Optional[int]=True, _lowerCamelCase : int=True, _lowerCamelCase : List[str]=True, _lowerCamelCase : Optional[Any]=True, _lowerCamelCase : int=99, _lowerCamelCase : Optional[int]=32, _lowerCamelCase : Tuple=5, _lowerCamelCase : Tuple=4, _lowerCamelCase : str=37, _lowerCamelCase : Union[str, Any]="gelu", _lowerCamelCase : int=0.1, _lowerCamelCase : List[Any]=0.1, _lowerCamelCase : Dict=5_12, _lowerCamelCase : List[Any]=16, _lowerCamelCase : Any=2, _lowerCamelCase : Any=0.02, _lowerCamelCase : Dict=4, ): '''simple docstring''' __A = parent __A = batch_size __A = seq_length __A = is_training __A = use_attention_mask __A = use_token_type_ids __A = use_labels __A = vocab_size __A = hidden_size __A = num_hidden_layers __A = num_attention_heads __A = intermediate_size __A = hidden_act __A = hidden_dropout_prob __A = attention_probs_dropout_prob __A = max_position_embeddings __A = type_vocab_size __A = type_sequence_label_size __A = initializer_range __A = num_choices def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' __A = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) __A = None if self.use_attention_mask: __A = random_attention_mask([self.batch_size, self.seq_length] ) __A = None if self.use_token_type_ids: __A = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size ) __A = RoFormerConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=_lowerCamelCase, initializer_range=self.initializer_range, ) return config, input_ids, token_type_ids, attention_mask def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = self.prepare_config_and_inputs() __A , __A , __A , __A = config_and_inputs __A = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask} return config, inputs_dict @require_flax class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : Dict = True A_ : Tuple = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' __A = FlaxRoFormerModelTester(self ) @slow def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' for model_class_name in self.all_model_classes: __A = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''', from_pt=_lowerCamelCase ) __A = model(np.ones((1, 1) ) ) self.assertIsNotNone(_lowerCamelCase ) @require_flax class snake_case ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' ) __A = jnp.array([[0, 1, 2, 3, 4, 5]] ) __A = model(_lowerCamelCase )[0] __A = 5_00_00 __A = (1, 6, vocab_size) self.assertEqual(output.shape, _lowerCamelCase ) __A = jnp.array( [[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3], _lowerCamelCase, atol=1e-4 ) )
266
1
"""simple docstring""" # DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin, SchedulerOutput @dataclass class snake_case ( _lowerCAmelCase ): '''simple docstring''' A_ : torch.FloatTensor A_ : torch.FloatTensor class snake_case ( _lowerCAmelCase , _lowerCAmelCase ): '''simple docstring''' A_ : List[str] = 1 @register_to_config def __init__( self : str, _lowerCamelCase : int = 20_00, _lowerCamelCase : float = 0.15, _lowerCamelCase : float = 0.01, _lowerCamelCase : float = 13_48.0, _lowerCamelCase : float = 1e-5, _lowerCamelCase : int = 1, ): '''simple docstring''' # standard deviation of the initial noise distribution __A = sigma_max # setable values __A = None self.set_sigmas(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : torch.FloatTensor, _lowerCamelCase : Optional[int] = None ): '''simple docstring''' return sample def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : int, _lowerCamelCase : float = None, _lowerCamelCase : Union[str, torch.device] = None ): '''simple docstring''' __A = sampling_eps if sampling_eps is not None else self.config.sampling_eps __A = torch.linspace(1, _lowerCamelCase, _lowerCamelCase, device=_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : int, _lowerCamelCase : float = None, _lowerCamelCase : float = None, _lowerCamelCase : float = None ): '''simple docstring''' __A = sigma_min if sigma_min is not None else self.config.sigma_min __A = sigma_max if sigma_max is not None else self.config.sigma_max __A = sampling_eps if sampling_eps is not None else self.config.sampling_eps if self.timesteps is None: self.set_timesteps(_lowerCamelCase, _lowerCamelCase ) __A = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps) __A = torch.exp(torch.linspace(math.log(_lowerCamelCase ), math.log(_lowerCamelCase ), _lowerCamelCase ) ) __A = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] ) def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : Union[str, Any], _lowerCamelCase : Union[str, Any] ): '''simple docstring''' return torch.where( timesteps == 0, torch.zeros_like(t.to(timesteps.device ) ), self.discrete_sigmas[timesteps - 1].to(timesteps.device ), ) def _SCREAMING_SNAKE_CASE ( self : Tuple, _lowerCamelCase : torch.FloatTensor, _lowerCamelCase : int, _lowerCamelCase : torch.FloatTensor, _lowerCamelCase : Optional[torch.Generator] = None, _lowerCamelCase : bool = True, ): '''simple docstring''' if self.timesteps is None: raise ValueError( '''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' ) __A = timestep * torch.ones( sample.shape[0], device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0]) __A = (timestep * (len(self.timesteps ) - 1)).long() # mps requires indices to be in the same device, so we use cpu as is the default with cuda __A = timesteps.to(self.discrete_sigmas.device ) __A = self.discrete_sigmas[timesteps].to(sample.device ) __A = self.get_adjacent_sigma(_lowerCamelCase, _lowerCamelCase ).to(sample.device ) __A = torch.zeros_like(_lowerCamelCase ) __A = (sigma**2 - adjacent_sigma**2) ** 0.5 # equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x) # also equation 47 shows the analog from SDE models to ancestral sampling methods __A = diffusion.flatten() while len(diffusion.shape ) < len(sample.shape ): __A = diffusion.unsqueeze(-1 ) __A = drift - diffusion**2 * model_output # equation 6: sample noise for the diffusion term of __A = randn_tensor( sample.shape, layout=sample.layout, generator=_lowerCamelCase, device=sample.device, dtype=sample.dtype ) __A = sample - drift # subtract because `dt` is a small negative timestep # TODO is the variable diffusion the correct scaling term for the noise? __A = prev_sample_mean + diffusion * noise # add impact of diffusion field g if not return_dict: return (prev_sample, prev_sample_mean) return SdeVeOutput(prev_sample=_lowerCamelCase, prev_sample_mean=_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : torch.FloatTensor, _lowerCamelCase : torch.FloatTensor, _lowerCamelCase : Optional[torch.Generator] = None, _lowerCamelCase : bool = True, ): '''simple docstring''' if self.timesteps is None: raise ValueError( '''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' ) # For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z" # sample noise for correction __A = randn_tensor(sample.shape, layout=sample.layout, generator=_lowerCamelCase ).to(sample.device ) # compute step size from the model_output, the noise, and the snr __A = torch.norm(model_output.reshape(model_output.shape[0], -1 ), dim=-1 ).mean() __A = torch.norm(noise.reshape(noise.shape[0], -1 ), dim=-1 ).mean() __A = (self.config.snr * noise_norm / grad_norm) ** 2 * 2 __A = step_size * torch.ones(sample.shape[0] ).to(sample.device ) # self.repeat_scalar(step_size, sample.shape[0]) # compute corrected sample: model_output term and noise term __A = step_size.flatten() while len(step_size.shape ) < len(sample.shape ): __A = step_size.unsqueeze(-1 ) __A = sample + step_size * model_output __A = prev_sample_mean + ((step_size * 2) ** 0.5) * noise if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : torch.FloatTensor, _lowerCamelCase : torch.FloatTensor, _lowerCamelCase : torch.FloatTensor, ): '''simple docstring''' # Make sure sigmas and timesteps have the same device and dtype as original_samples __A = timesteps.to(original_samples.device ) __A = self.discrete_sigmas.to(original_samples.device )[timesteps] __A = ( noise * sigmas[:, None, None, None] if noise is not None else torch.randn_like(_lowerCamelCase ) * sigmas[:, None, None, None] ) __A = noise + original_samples return noisy_samples def __len__( self : Optional[Any] ): '''simple docstring''' return self.config.num_train_timesteps
266
"""simple docstring""" from collections import defaultdict from math import ceil, sqrt def lowerCAmelCase ( __UpperCamelCase = 1_0_0_0_0_0_0 , __UpperCamelCase = 1_0 ): """simple docstring""" __A = defaultdict(__UpperCamelCase ) for outer_width in range(3 , (t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: __A = max( ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 ) else: __A = 1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(__UpperCamelCase , outer_width - 1 , 2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 1_0 ) if __name__ == "__main__": print(F'''{solution() = }''')
266
1
"""simple docstring""" lowercase_ = '\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n' lowercase_ = [{'type': 'code', 'content': INSTALL_CONTENT}] lowercase_ = { '{processor_class}': 'FakeProcessorClass', '{model_class}': 'FakeModelClass', '{object_class}': 'FakeObjectClass', }
266
"""simple docstring""" import numpy as np import torch from torch.utils.data import DataLoader from accelerate.utils.dataclasses import DistributedType class snake_case : '''simple docstring''' def __init__( self : Optional[int], _lowerCamelCase : Optional[int]=2, _lowerCamelCase : Optional[int]=3, _lowerCamelCase : int=64, _lowerCamelCase : List[str]=None ): '''simple docstring''' __A = np.random.default_rng(_lowerCamelCase ) __A = length __A = rng.normal(size=(length,) ).astype(np.floataa ) __A = a * self.x + b + rng.normal(scale=0.1, size=(length,) ).astype(np.floataa ) def __len__( self : str ): '''simple docstring''' return self.length def __getitem__( self : Dict, _lowerCamelCase : Optional[int] ): '''simple docstring''' return {"x": self.x[i], "y": self.y[i]} class snake_case ( torch.nn.Module ): '''simple docstring''' def __init__( self : Optional[Any], _lowerCamelCase : Tuple=0, _lowerCamelCase : Any=0, _lowerCamelCase : Optional[Any]=False ): '''simple docstring''' super().__init__() __A = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) __A = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) __A = True def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : Optional[Any]=None ): '''simple docstring''' if self.first_batch: print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' ) __A = False return x * self.a[0] + self.b[0] class snake_case ( torch.nn.Module ): '''simple docstring''' def __init__( self : str, _lowerCamelCase : Optional[Any]=0, _lowerCamelCase : Any=0, _lowerCamelCase : List[Any]=False ): '''simple docstring''' super().__init__() __A = torch.nn.Parameter(torch.tensor(_lowerCamelCase ).float() ) __A = torch.nn.Parameter(torch.tensor(_lowerCamelCase ).float() ) __A = True def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : List[str]=None ): '''simple docstring''' if self.first_batch: print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' ) __A = False return x * self.a + self.b def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase = 1_6 ): """simple docstring""" from datasets import load_dataset from transformers import AutoTokenizer __A = AutoTokenizer.from_pretrained('''bert-base-cased''' ) __A = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''} __A = load_dataset('''csv''' , data_files=__UpperCamelCase ) __A = datasets['''train'''].unique('''label''' ) __A = {v: i for i, v in enumerate(__UpperCamelCase )} def tokenize_function(__UpperCamelCase ): # max_length=None => use the model max length (it's actually the default) __A = tokenizer( examples['''sentence1'''] , examples['''sentence2'''] , truncation=__UpperCamelCase , max_length=__UpperCamelCase , padding='''max_length''' ) if "label" in examples: __A = [label_to_id[l] for l in examples['''label''']] return outputs # Apply the method we just defined to all the examples in all the splits of the dataset __A = datasets.map( __UpperCamelCase , batched=__UpperCamelCase , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , ) def collate_fn(__UpperCamelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(__UpperCamelCase , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' ) return tokenizer.pad(__UpperCamelCase , padding='''longest''' , return_tensors='''pt''' ) # Instantiate dataloaders. __A = DataLoader(tokenized_datasets['''train'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=2 ) __A = DataLoader(tokenized_datasets['''validation'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=1 ) return train_dataloader, eval_dataloader
266
1
import warnings from ...utils import logging from .image_processing_perceiver import PerceiverImageProcessor UpperCAmelCase__ = logging.get_logger(__name__) class lowercase_ ( lowercase ): '''simple docstring''' def __init__( self : Optional[int] , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Any ) ->None: """simple docstring""" warnings.warn( '''The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use PerceiverImageProcessor instead.''' , __UpperCAmelCase , ) super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
0
"""simple docstring""" from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo lowercase_ = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n' lowercase_ = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n' lowercase_ = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case ( datasets.Metric ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''', id='''token''' ), id='''sequence''' ), '''references''': datasets.Sequence( datasets.Sequence(datasets.Value('''string''', id='''token''' ), id='''sequence''' ), id='''references''' ), } ), ) def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : List[List[List[str]]], _lowerCamelCase : List[List[str]], _lowerCamelCase : int = 1, _lowerCamelCase : int = 4, ): '''simple docstring''' return { "google_bleu": gleu_score.corpus_gleu( list_of_references=_lowerCamelCase, hypotheses=_lowerCamelCase, min_len=_lowerCamelCase, max_len=_lowerCamelCase ) }
266
0
'''simple docstring''' import argparse import requests import torch from PIL import Image from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel def lowerCAmelCase_ ( snake_case_ : int ) -> List[str]: '''simple docstring''' if "img_encoder.pos_embed" in name: UpperCAmelCase_ = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" ) if "img_encoder.patch_embed.proj" in name: UpperCAmelCase_ = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" ) if "img_encoder.patch_embed.norm" in name: UpperCAmelCase_ = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" ) if "img_encoder.layers" in name: UpperCAmelCase_ = name.replace("img_encoder.layers" , "vision_model.encoder.stages" ) if "blocks" in name and "res" not in name: UpperCAmelCase_ = name.replace("blocks" , "layers" ) if "attn" in name and "pre_assign" not in name: UpperCAmelCase_ = name.replace("attn" , "self_attn" ) if "proj" in name and "self_attn" in name and "text" not in name: UpperCAmelCase_ = name.replace("proj" , "out_proj" ) if "pre_assign_attn.attn.proj" in name: UpperCAmelCase_ = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" ) if "norm1" in name: UpperCAmelCase_ = name.replace("norm1" , "layer_norm1" ) if "norm2" in name and "pre_assign" not in name: UpperCAmelCase_ = name.replace("norm2" , "layer_norm2" ) if "img_encoder.norm" in name: UpperCAmelCase_ = name.replace("img_encoder.norm" , "vision_model.layernorm" ) # text encoder if "text_encoder.token_embedding" in name: UpperCAmelCase_ = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" ) if "text_encoder.positional_embedding" in name: UpperCAmelCase_ = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" ) if "text_encoder.transformer.resblocks." in name: UpperCAmelCase_ = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." ) if "ln_1" in name: UpperCAmelCase_ = name.replace("ln_1" , "layer_norm1" ) if "ln_2" in name: UpperCAmelCase_ = name.replace("ln_2" , "layer_norm2" ) if "c_fc" in name: UpperCAmelCase_ = name.replace("c_fc" , "fc1" ) if "c_proj" in name: UpperCAmelCase_ = name.replace("c_proj" , "fc2" ) if "text_encoder" in name: UpperCAmelCase_ = name.replace("text_encoder" , "text_model" ) if "ln_final" in name: UpperCAmelCase_ = name.replace("ln_final" , "final_layer_norm" ) # projection layers if "img_projector.linear_hidden." in name: UpperCAmelCase_ = name.replace("img_projector.linear_hidden." , "visual_projection." ) if "img_projector.linear_out." in name: UpperCAmelCase_ = name.replace("img_projector.linear_out." , "visual_projection.3." ) if "text_projector.linear_hidden" in name: UpperCAmelCase_ = name.replace("text_projector.linear_hidden" , "text_projection" ) if "text_projector.linear_out" in name: UpperCAmelCase_ = name.replace("text_projector.linear_out" , "text_projection.3" ) return name def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : List[str] ) -> Union[str, Any]: '''simple docstring''' for key in orig_state_dict.copy().keys(): UpperCAmelCase_ = orig_state_dict.pop(snake_case_ ) if "qkv" in key: # weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors UpperCAmelCase_ = key.split("." ) UpperCAmelCase_ , UpperCAmelCase_ = int(key_split[2] ), int(key_split[4] ) UpperCAmelCase_ = config.vision_config.hidden_size if "weight" in key: UpperCAmelCase_ = val[:dim, :] UpperCAmelCase_ = val[dim : dim * 2, :] UpperCAmelCase_ = val[-dim:, :] else: UpperCAmelCase_ = val[:dim] UpperCAmelCase_ = val[dim : dim * 2] UpperCAmelCase_ = val[-dim:] elif "in_proj" in key: # weights and biases of the key, value and query projections of text encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors UpperCAmelCase_ = key.split("." ) UpperCAmelCase_ = int(key_split[3] ) UpperCAmelCase_ = config.text_config.hidden_size if "weight" in key: UpperCAmelCase_ = val[:dim, :] UpperCAmelCase_ = val[ dim : dim * 2, : ] UpperCAmelCase_ = val[-dim:, :] else: UpperCAmelCase_ = val[:dim] UpperCAmelCase_ = val[dim : dim * 2] UpperCAmelCase_ = val[-dim:] else: UpperCAmelCase_ = rename_key(snake_case_ ) # squeeze if necessary if ( "text_projection.0" in new_name or "text_projection.3" in new_name or "visual_projection.0" in new_name or "visual_projection.3" in new_name ): UpperCAmelCase_ = val.squeeze_() else: UpperCAmelCase_ = val return orig_state_dict def lowerCAmelCase_ ( ) -> Any: '''simple docstring''' UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg" UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ) return im @torch.no_grad() def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : List[Any] , snake_case_ : Optional[Any]="groupvit-gcc-yfcc" , snake_case_ : List[str]=False ) -> Any: '''simple docstring''' UpperCAmelCase_ = GroupViTConfig() UpperCAmelCase_ = GroupViTModel(snake_case_ ).eval() UpperCAmelCase_ = torch.load(snake_case_ , map_location="cpu" )["model"] UpperCAmelCase_ = convert_state_dict(snake_case_ , snake_case_ ) UpperCAmelCase_ , UpperCAmelCase_ = model.load_state_dict(snake_case_ , strict=snake_case_ ) assert missing_keys == ["text_model.embeddings.position_ids"] assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(snake_case_ ) == 0) # verify result UpperCAmelCase_ = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" ) UpperCAmelCase_ = prepare_img() UpperCAmelCase_ = processor(text=["a photo of a cat", "a photo of a dog"] , images=snake_case_ , padding=snake_case_ , return_tensors="pt" ) with torch.no_grad(): UpperCAmelCase_ = model(**snake_case_ ) if model_name == "groupvit-gcc-yfcc": UpperCAmelCase_ = torch.tensor([[13.3523, 6.3629]] ) elif model_name == "groupvit-gcc-redcaps": UpperCAmelCase_ = torch.tensor([[16.1873, 8.6230]] ) else: raise ValueError(f"""Model name {model_name} not supported.""" ) assert torch.allclose(outputs.logits_per_image , snake_case_ , atol=1E-3 ) processor.save_pretrained(snake_case_ ) model.save_pretrained(snake_case_ ) print("Successfully saved processor and model to" , snake_case_ ) if push_to_hub: print("Pushing to the hub..." ) processor.push_to_hub(snake_case_ , organization="nielsr" ) model.push_to_hub(snake_case_ , organization="nielsr" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_: Dict =argparse.ArgumentParser() parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.' ) parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint') parser.add_argument( '--model_name', default='groupvit-gccy-fcc', type=str, help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'', ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.', ) SCREAMING_SNAKE_CASE_: Optional[int] =parser.parse_args() convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
1
"""simple docstring""" class snake_case : '''simple docstring''' def __init__( self : List[str], _lowerCamelCase : list[int] ): '''simple docstring''' __A = len(_lowerCamelCase ) __A = [0] * len_array if len_array > 0: __A = array[0] for i in range(1, _lowerCamelCase ): __A = self.prefix_sum[i - 1] + array[i] def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : int, _lowerCamelCase : int ): '''simple docstring''' if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : int ): '''simple docstring''' __A = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(_lowerCamelCase ) return False if __name__ == "__main__": import doctest doctest.testmod()
266
0
'''simple docstring''' def _SCREAMING_SNAKE_CASE (A = 600_851_475_143 ) -> int: """simple docstring""" try: lowercase__ = int(A ) except (TypeError, ValueError): raise TypeError('''Parameter n must be int or castable to int.''' ) if n <= 0: raise ValueError('''Parameter n must be greater than or equal to one.''' ) lowercase__ = 2 lowercase__ = 0 if n == 2: return 2 while n > 2: while n % i != 0: i += 1 lowercase__ = i while n % i == 0: lowercase__ = n // i i += 1 return int(A ) if __name__ == "__main__": print(f"""{solution() = }""")
2
"""simple docstring""" import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowercase_ = logging.get_logger(__name__) lowercase_ = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } lowercase_ = { 'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'}, 'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'}, 'tokenizer_config_file': { 'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json' }, } lowercase_ = {'facebook/blenderbot-3B': 128} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def lowerCAmelCase ( ): """simple docstring""" __A = ( list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) ) ) __A = bs[:] __A = 0 for b in range(2**8 ): if b not in bs: bs.append(__UpperCamelCase ) cs.append(2**8 + n ) n += 1 __A = [chr(__UpperCamelCase ) for n in cs] return dict(zip(__UpperCamelCase , __UpperCamelCase ) ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = set() __A = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __A = char return pairs class snake_case ( _lowerCAmelCase ): '''simple docstring''' A_ : Tuple = VOCAB_FILES_NAMES A_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP A_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A_ : Optional[Any] = ["input_ids", "attention_mask"] def __init__( self : Dict, _lowerCamelCase : Optional[Any], _lowerCamelCase : List[str], _lowerCamelCase : Dict="replace", _lowerCamelCase : Any="<s>", _lowerCamelCase : Optional[int]="</s>", _lowerCamelCase : Dict="</s>", _lowerCamelCase : List[Any]="<s>", _lowerCamelCase : List[str]="<unk>", _lowerCamelCase : str="<pad>", _lowerCamelCase : Any="<mask>", _lowerCamelCase : Any=False, **_lowerCamelCase : Tuple, ): '''simple docstring''' __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else bos_token __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else eos_token __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else sep_token __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else cls_token __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else unk_token __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else mask_token super().__init__( errors=_lowerCamelCase, bos_token=_lowerCamelCase, eos_token=_lowerCamelCase, unk_token=_lowerCamelCase, sep_token=_lowerCamelCase, cls_token=_lowerCamelCase, pad_token=_lowerCamelCase, mask_token=_lowerCamelCase, add_prefix_space=_lowerCamelCase, **_lowerCamelCase, ) with open(_lowerCamelCase, encoding='''utf-8''' ) as vocab_handle: __A = json.load(_lowerCamelCase ) __A = {v: k for k, v in self.encoder.items()} __A = errors # how to handle errors in decoding __A = bytes_to_unicode() __A = {v: k for k, v in self.byte_encoder.items()} with open(_lowerCamelCase, encoding='''utf-8''' ) as merges_handle: __A = merges_handle.read().split('''\n''' )[1:-1] __A = [tuple(merge.split() ) for merge in bpe_merges] __A = dict(zip(_lowerCamelCase, range(len(_lowerCamelCase ) ) ) ) __A = {} __A = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __A = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' return len(self.encoder ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' return dict(self.encoder, **self.added_tokens_encoder ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : List[Any] ): '''simple docstring''' if token in self.cache: return self.cache[token] __A = tuple(_lowerCamelCase ) __A = get_pairs(_lowerCamelCase ) if not pairs: return token while True: __A = min(_lowerCamelCase, key=lambda _lowerCamelCase : self.bpe_ranks.get(_lowerCamelCase, float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break __A , __A = bigram __A = [] __A = 0 while i < len(_lowerCamelCase ): try: __A = word.index(_lowerCamelCase, _lowerCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __A = j if word[i] == first and i < len(_lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __A = tuple(_lowerCamelCase ) __A = new_word if len(_lowerCamelCase ) == 1: break else: __A = get_pairs(_lowerCamelCase ) __A = ''' '''.join(_lowerCamelCase ) __A = word return word def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : Dict ): '''simple docstring''' __A = [] for token in re.findall(self.pat, _lowerCamelCase ): __A = ''''''.join( self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowerCamelCase ).split(''' ''' ) ) return bpe_tokens def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : Dict ): '''simple docstring''' return self.encoder.get(_lowerCamelCase, self.encoder.get(self.unk_token ) ) def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : Any ): '''simple docstring''' return self.decoder.get(_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : Dict ): '''simple docstring''' __A = ''''''.join(_lowerCamelCase ) __A = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''', errors=self.errors ) return text def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : str, _lowerCamelCase : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(_lowerCamelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __A = os.path.join( _lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) __A = os.path.join( _lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(_lowerCamelCase, '''w''', encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=_lowerCamelCase, ensure_ascii=_lowerCamelCase ) + '''\n''' ) __A = 0 with open(_lowerCamelCase, '''w''', encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda _lowerCamelCase : kv[1] ): if index != token_index: logger.warning( f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.' ''' Please check that the tokenizer is not corrupted!''' ) __A = token_index writer.write(''' '''.join(_lowerCamelCase ) + '''\n''' ) index += 1 return vocab_file, merge_file def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : List[int], _lowerCamelCase : Optional[List[int]] = None, _lowerCamelCase : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCamelCase, token_ids_a=_lowerCamelCase, already_has_special_tokens=_lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(_lowerCamelCase )) + [1] return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1] def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : List[int], _lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' __A = [self.sep_token_id] __A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : Union[str, Any], _lowerCamelCase : List[str]=False, **_lowerCamelCase : List[Any] ): '''simple docstring''' __A = kwargs.pop('''add_prefix_space''', self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(_lowerCamelCase ) > 0 and not text[0].isspace()): __A = ''' ''' + text return (text, kwargs) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : List[int], _lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' return token_ids_a + [self.eos_token_id] def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : "Conversation" ): '''simple docstring''' __A = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(''' ''' + text ) else: # Generated responses should contain them already. inputs.append(_lowerCamelCase ) __A = ''' '''.join(_lowerCamelCase ) __A = self.encode(_lowerCamelCase ) if len(_lowerCamelCase ) > self.model_max_length: __A = input_ids[-self.model_max_length :] logger.warning(f'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' ) return input_ids
266
0
'''simple docstring''' import requests def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : int = {'''Content-Type''': '''application/json'''} A : List[str] = requests.post(snake_case__ , json={'''text''': message_body} , headers=snake_case__ ) if response.status_code != 200: A : Optional[Any] = ( '''Request to slack returned an error ''' F'{response.status_code}, the response is:\n{response.text}' ) raise ValueError(snake_case__ ) if __name__ == "__main__": # Set the slack url to the one provided by Slack when you create the webhook at # https://my.slack.com/services/new/incoming-webhook/ send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
3
"""simple docstring""" import importlib import inspect import json import os import re import shutil import sys from pathlib import Path from typing import Dict, Optional, Union from urllib import request from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info from packaging import version from .. import __version__ from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging lowercase_ = ( 'https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py' ) lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name def lowerCAmelCase ( ): """simple docstring""" __A = '''https://pypi.org/pypi/diffusers/json''' __A = json.loads(request.urlopen(__UpperCamelCase ).read() )['''releases'''].keys() return sorted(__UpperCamelCase , key=lambda __UpperCamelCase : version.Version(__UpperCamelCase ) ) def lowerCAmelCase ( ): """simple docstring""" if HF_MODULES_CACHE in sys.path: return sys.path.append(__UpperCamelCase ) os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase ) __A = Path(__UpperCamelCase ) / '''__init__.py''' if not init_path.exists(): init_path.touch() def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" init_hf_modules() __A = Path(__UpperCamelCase ) / name # If the parent module does not exist yet, recursively create it. if not dynamic_module_path.parent.exists(): create_dynamic_module(dynamic_module_path.parent ) os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase ) __A = dynamic_module_path / '''__init__.py''' if not init_path.exists(): init_path.touch() def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" with open(__UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f: __A = f.read() # Imports of the form `import .xxx` __A = re.findall('''^\s*import\s+\.(\S+)\s*$''' , __UpperCamelCase , flags=re.MULTILINE ) # Imports of the form `from .xxx import yyy` relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , __UpperCamelCase , flags=re.MULTILINE ) # Unique-ify return list(set(__UpperCamelCase ) ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = False __A = [module_file] __A = [] # Let's recurse through all relative imports while not no_change: __A = [] for f in files_to_check: new_imports.extend(get_relative_imports(__UpperCamelCase ) ) __A = Path(__UpperCamelCase ).parent __A = [str(module_path / m ) for m in new_imports] __A = [f for f in new_import_files if f not in all_relative_imports] __A = [f'{f}.py' for f in new_import_files] __A = len(__UpperCamelCase ) == 0 all_relative_imports.extend(__UpperCamelCase ) return all_relative_imports def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" with open(__UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f: __A = f.read() # Imports of the form `import xxx` __A = re.findall('''^\s*import\s+(\S+)\s*$''' , __UpperCamelCase , flags=re.MULTILINE ) # Imports of the form `from xxx import yyy` imports += re.findall('''^\s*from\s+(\S+)\s+import''' , __UpperCamelCase , flags=re.MULTILINE ) # Only keep the top-level module __A = [imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )] # Unique-ify and test we got them all __A = list(set(__UpperCamelCase ) ) __A = [] for imp in imports: try: importlib.import_module(__UpperCamelCase ) except ImportError: missing_packages.append(__UpperCamelCase ) if len(__UpperCamelCase ) > 0: raise ImportError( '''This modeling file requires the following packages that were not found in your environment: ''' f'{", ".join(__UpperCamelCase )}. Run `pip install {" ".join(__UpperCamelCase )}`' ) return get_relative_imports(__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" __A = module_path.replace(os.path.sep , '''.''' ) __A = importlib.import_module(__UpperCamelCase ) if class_name is None: return find_pipeline_class(__UpperCamelCase ) return getattr(__UpperCamelCase , __UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" from ..pipelines import DiffusionPipeline __A = dict(inspect.getmembers(__UpperCamelCase , inspect.isclass ) ) __A = None for cls_name, cls in cls_members.items(): if ( cls_name != DiffusionPipeline.__name__ and issubclass(cls , __UpperCamelCase ) and cls.__module__.split('''.''' )[0] != "diffusers" ): if pipeline_class is not None: raise ValueError( f'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:' f' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in' f' {loaded_module}.' ) __A = cls return pipeline_class def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , ): """simple docstring""" __A = str(__UpperCamelCase ) __A = os.path.join(__UpperCamelCase , __UpperCamelCase ) if os.path.isfile(__UpperCamelCase ): __A = module_file_or_url __A = '''local''' elif pretrained_model_name_or_path.count('''/''' ) == 0: __A = get_diffusers_versions() # cut ".dev0" __A = '''v''' + '''.'''.join(__version__.split('''.''' )[:3] ) # retrieve github version that matches if revision is None: __A = latest_version if latest_version[1:] in available_versions else '''main''' logger.info(f'Defaulting to latest_version: {revision}.' ) elif revision in available_versions: __A = f'v{revision}' elif revision == "main": __A = revision else: raise ValueError( f'`custom_revision`: {revision} does not exist. Please make sure to choose one of' f' {", ".join(available_versions + ["main"] )}.' ) # community pipeline on GitHub __A = COMMUNITY_PIPELINES_URL.format(revision=__UpperCamelCase , pipeline=__UpperCamelCase ) try: __A = cached_download( __UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , proxies=__UpperCamelCase , resume_download=__UpperCamelCase , local_files_only=__UpperCamelCase , use_auth_token=__UpperCamelCase , ) __A = '''git''' __A = pretrained_model_name_or_path + '''.py''' except EnvironmentError: logger.error(f'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' ) raise else: try: # Load from URL or cache if already cached __A = hf_hub_download( __UpperCamelCase , __UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , proxies=__UpperCamelCase , resume_download=__UpperCamelCase , local_files_only=__UpperCamelCase , use_auth_token=__UpperCamelCase , ) __A = os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) ) except EnvironmentError: logger.error(f'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' ) raise # Check we have all the requirements in our environment __A = check_imports(__UpperCamelCase ) # Now we move the module inside our cached dynamic modules. __A = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule create_dynamic_module(__UpperCamelCase ) __A = Path(__UpperCamelCase ) / full_submodule if submodule == "local" or submodule == "git": # We always copy local files (we could hash the file to see if there was a change, and give them the name of # that hash, to only copy when there is a modification but it seems overkill for now). # The only reason we do the copy is to avoid putting too many folders in sys.path. shutil.copy(__UpperCamelCase , submodule_path / module_file ) for module_needed in modules_needed: __A = f'{module_needed}.py' shutil.copy(os.path.join(__UpperCamelCase , __UpperCamelCase ) , submodule_path / module_needed ) else: # Get the commit hash # TODO: we will get this info in the etag soon, so retrieve it from there and not here. if isinstance(__UpperCamelCase , __UpperCamelCase ): __A = use_auth_token elif use_auth_token is True: __A = HfFolder.get_token() else: __A = None __A = model_info(__UpperCamelCase , revision=__UpperCamelCase , token=__UpperCamelCase ).sha # The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the # benefit of versioning. __A = submodule_path / commit_hash __A = full_submodule + os.path.sep + commit_hash create_dynamic_module(__UpperCamelCase ) if not (submodule_path / module_file).exists(): shutil.copy(__UpperCamelCase , submodule_path / module_file ) # Make sure we also have every file with relative for module_needed in modules_needed: if not (submodule_path / module_needed).exists(): get_cached_module_file( __UpperCamelCase , f'{module_needed}.py' , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , resume_download=__UpperCamelCase , proxies=__UpperCamelCase , use_auth_token=__UpperCamelCase , revision=__UpperCamelCase , local_files_only=__UpperCamelCase , ) return os.path.join(__UpperCamelCase , __UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , **__UpperCamelCase , ): """simple docstring""" __A = get_cached_module_file( __UpperCamelCase , __UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , resume_download=__UpperCamelCase , proxies=__UpperCamelCase , use_auth_token=__UpperCamelCase , revision=__UpperCamelCase , local_files_only=__UpperCamelCase , ) return get_class_in_module(__UpperCamelCase , final_module.replace('''.py''' , '''''' ) )
266
0
'''simple docstring''' import copy import tempfile import unittest from huggingface_hub import HfFolder, delete_repo from parameterized import parameterized from requests.exceptions import HTTPError from transformers import AutoConfig, GenerationConfig from transformers.testing_utils import TOKEN, USER, is_staging_test class UpperCAmelCase_ ( unittest.TestCase ): @parameterized.expand([(None,), ('foo.json',)] ) def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : Optional[Any] ) -> Tuple: lowerCAmelCase = GenerationConfig( do_sample=UpperCAmelCase__ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(UpperCAmelCase__ , config_name=UpperCAmelCase__ ) lowerCAmelCase = GenerationConfig.from_pretrained(UpperCAmelCase__ , config_name=UpperCAmelCase__ ) # Checks parameters that were specified self.assertEqual(loaded_config.do_sample , UpperCAmelCase__ ) self.assertEqual(loaded_config.temperature , 0.7 ) self.assertEqual(loaded_config.length_penalty , 1.0 ) self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] ) # Checks parameters that were not specified (defaults) self.assertEqual(loaded_config.top_k , 5_0 ) self.assertEqual(loaded_config.max_length , 2_0 ) self.assertEqual(loaded_config.max_time , UpperCAmelCase__ ) def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]: lowerCAmelCase = AutoConfig.from_pretrained('gpt2' ) lowerCAmelCase = GenerationConfig.from_model_config(UpperCAmelCase__ ) lowerCAmelCase = GenerationConfig() # The generation config has loaded a few non-default parameters from the model config self.assertNotEqual(UpperCAmelCase__ , UpperCAmelCase__ ) # One of those parameters is eos_token_id -- check if it matches self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id ) self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id ) def __UpperCAmelCase ( self : str ) -> Dict: lowerCAmelCase = GenerationConfig() lowerCAmelCase = { 'max_new_tokens': 1_0_2_4, 'foo': 'bar', } lowerCAmelCase = copy.deepcopy(UpperCAmelCase__ ) lowerCAmelCase = generation_config.update(**UpperCAmelCase__ ) # update_kwargs was not modified (no side effects) self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ ) # update_kwargs was used to update the config on valid attributes self.assertEqual(generation_config.max_new_tokens , 1_0_2_4 ) # `.update()` returns a dictionary of unused kwargs self.assertEqual(UpperCAmelCase__ , {'foo': 'bar'} ) def __UpperCAmelCase ( self : int ) -> Any: lowerCAmelCase = GenerationConfig() lowerCAmelCase = 'bar' with tempfile.TemporaryDirectory('test-generation-config' ) as tmp_dir: generation_config.save_pretrained(UpperCAmelCase__ ) lowerCAmelCase = GenerationConfig.from_pretrained(UpperCAmelCase__ ) # update_kwargs was used to update the config on valid attributes self.assertEqual(new_config.foo , 'bar' ) lowerCAmelCase = GenerationConfig.from_model_config(UpperCAmelCase__ ) assert not hasattr(UpperCAmelCase__ , 'foo' ) # no new kwargs should be initialized if from config def __UpperCAmelCase ( self : int ) -> int: lowerCAmelCase = GenerationConfig() self.assertEqual(default_config.temperature , 1.0 ) self.assertEqual(default_config.do_sample , UpperCAmelCase__ ) self.assertEqual(default_config.num_beams , 1 ) lowerCAmelCase = GenerationConfig( do_sample=UpperCAmelCase__ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) self.assertEqual(config.temperature , 0.7 ) self.assertEqual(config.do_sample , UpperCAmelCase__ ) self.assertEqual(config.num_beams , 1 ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(UpperCAmelCase__ ) lowerCAmelCase = GenerationConfig.from_pretrained(UpperCAmelCase__ , temperature=1.0 ) self.assertEqual(loaded_config.temperature , 1.0 ) self.assertEqual(loaded_config.do_sample , UpperCAmelCase__ ) self.assertEqual(loaded_config.num_beams , 1 ) # default value @is_staging_test class UpperCAmelCase_ ( unittest.TestCase ): @classmethod def __UpperCAmelCase ( cls : Dict ) -> List[str]: lowerCAmelCase = TOKEN HfFolder.save_token(UpperCAmelCase__ ) @classmethod def __UpperCAmelCase ( cls : str ) -> str: try: delete_repo(token=cls._token , repo_id='test-generation-config' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='valid_org/test-generation-config-org' ) except HTTPError: pass def __UpperCAmelCase ( self : int ) -> List[Any]: lowerCAmelCase = GenerationConfig( do_sample=UpperCAmelCase__ , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub('test-generation-config' , use_auth_token=self._token ) lowerCAmelCase = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(UpperCAmelCase__ , getattr(UpperCAmelCase__ , UpperCAmelCase__ ) ) # Reset repo delete_repo(token=self._token , repo_id='test-generation-config' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( UpperCAmelCase__ , repo_id='test-generation-config' , push_to_hub=UpperCAmelCase__ , use_auth_token=self._token ) lowerCAmelCase = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(UpperCAmelCase__ , getattr(UpperCAmelCase__ , UpperCAmelCase__ ) ) def __UpperCAmelCase ( self : Dict ) -> Optional[int]: lowerCAmelCase = GenerationConfig( do_sample=UpperCAmelCase__ , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub('valid_org/test-generation-config-org' , use_auth_token=self._token ) lowerCAmelCase = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(UpperCAmelCase__ , getattr(UpperCAmelCase__ , UpperCAmelCase__ ) ) # Reset repo delete_repo(token=self._token , repo_id='valid_org/test-generation-config-org' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( UpperCAmelCase__ , repo_id='valid_org/test-generation-config-org' , push_to_hub=UpperCAmelCase__ , use_auth_token=self._token ) lowerCAmelCase = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(UpperCAmelCase__ , getattr(UpperCAmelCase__ , UpperCAmelCase__ ) )
4
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : Optional[int] ): '''simple docstring''' for model_result in results.values(): for batch_size, sequence_length in zip(model_result['''bs'''], model_result['''ss'''] ): __A = model_result['''result'''][batch_size][sequence_length] self.assertIsNotNone(_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = '''sgugger/tiny-distilbert-classification''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, only_pretrain_model=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, torchscript=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == '''cpu''', '''Cant do half precision''' ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, fpaa=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = AutoConfig.from_pretrained(_lowerCamelCase ) # set architectures equal to `None` __A = None __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase, configs=[config] ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == '''cpu''', '''Can\'t do half precision''' ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], fpaa=_lowerCamelCase, multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = AutoConfig.from_pretrained(_lowerCamelCase ) __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase, configs=[config] ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' __A = '''sshleifer/tinier_bart''' __A = AutoConfig.from_pretrained(_lowerCamelCase ) __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase, configs=[config] ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = AutoConfig.from_pretrained(_lowerCamelCase ) __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase, configs=[config] ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = '''sshleifer/tinier_bart''' __A = AutoConfig.from_pretrained(_lowerCamelCase ) __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase, configs=[config] ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' with tempfile.TemporaryDirectory() as tmp_dir: __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, save_to_csv=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], inference_time_csv_file=os.path.join(_lowerCamelCase, '''inf_time.csv''' ), train_memory_csv_file=os.path.join(_lowerCamelCase, '''train_mem.csv''' ), inference_memory_csv_file=os.path.join(_lowerCamelCase, '''inf_mem.csv''' ), train_time_csv_file=os.path.join(_lowerCamelCase, '''train_time.csv''' ), env_info_csv_file=os.path.join(_lowerCamelCase, '''env.csv''' ), multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) benchmark.run() self.assertTrue(Path(os.path.join(_lowerCamelCase, '''inf_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCamelCase, '''train_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCamelCase, '''inf_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCamelCase, '''train_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCamelCase, '''env.csv''' ) ).exists() ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' def _check_summary_is_not_empty(_lowerCamelCase : List[Any] ): self.assertTrue(hasattr(_lowerCamelCase, '''sequential''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''cumulative''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''current''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''total''' ) ) with tempfile.TemporaryDirectory() as tmp_dir: __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], log_filename=os.path.join(_lowerCamelCase, '''log.txt''' ), log_print=_lowerCamelCase, trace_memory_line_by_line=_lowerCamelCase, multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(_lowerCamelCase, '''log.txt''' ) ).exists() )
266
0
from .imports import is_tqdm_available if is_tqdm_available(): from tqdm.auto import tqdm as _tqdm from ..state import PartialState def UpperCAmelCase_ ( __snake_case = True , *__snake_case , **__snake_case ) -> Tuple: """simple docstring""" if not is_tqdm_available(): raise ImportError('''Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.''' ) _lowercase =False if main_process_only: _lowercase =PartialState().local_process_index == 0 return _tqdm(*__snake_case , **__snake_case , disable=__snake_case )
5
"""simple docstring""" import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowercase_ = get_tests_dir('fixtures/test_sentencepiece_no_bos.model') @require_sentencepiece @require_tokenizers class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : Optional[Any] = PegasusTokenizer A_ : int = PegasusTokenizerFast A_ : Optional[Any] = True A_ : Union[str, Any] = True def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing __A = PegasusTokenizer(_lowerCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' return PegasusTokenizer.from_pretrained('''google/pegasus-large''' ) def _SCREAMING_SNAKE_CASE ( self : int, **_lowerCamelCase : List[Any] ): '''simple docstring''' return PegasusTokenizer.from_pretrained(self.tmpdirname, **_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : Dict ): '''simple docstring''' return ("This is a test", "This is a test") def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' __A = '''</s>''' __A = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ), _lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ), _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' __A = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0], '''<pad>''' ) self.assertEqual(vocab_keys[1], '''</s>''' ) self.assertEqual(vocab_keys[-1], '''v''' ) self.assertEqual(len(_lowerCamelCase ), 11_03 ) def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size, 11_03 ) def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) __A = self.tokenizer_class.from_pretrained(self.tmpdirname ) __A = ( '''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important''' ''' </s> <pad> <pad> <pad>''' ) __A = rust_tokenizer([raw_input_str], return_tensors=_lowerCamelCase, add_special_tokens=_lowerCamelCase ).input_ids[0] __A = py_tokenizer([raw_input_str], return_tensors=_lowerCamelCase, add_special_tokens=_lowerCamelCase ).input_ids[0] self.assertListEqual(_lowerCamelCase, _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' __A = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word __A = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.''' __A = [2, 4_13, 6_15, 1_14, 3, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1] __A = tokenizer([raw_input_str], return_tensors=_lowerCamelCase ).input_ids[0] self.assertListEqual(_lowerCamelCase, _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 9_61_03 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 1_03 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_05 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 10_24 __A = '''To ensure a smooth flow of bank resolutions.''' __A = [4_13, 6_15, 1_14, 22_91, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1] __A = tokenizer([raw_input_str], return_tensors=_lowerCamelCase ).input_ids[0] self.assertListEqual(_lowerCamelCase, _lowerCamelCase ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = ['''This is going to be way too long.''' * 1_50, '''short example'''] __A = ['''not super long but more than 5 tokens''', '''tiny'''] __A = self._large_tokenizer(_lowerCamelCase, padding=_lowerCamelCase, truncation=_lowerCamelCase, return_tensors='''pt''' ) __A = self._large_tokenizer( text_target=_lowerCamelCase, max_length=5, padding=_lowerCamelCase, truncation=_lowerCamelCase, return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 10_24) assert batch.attention_mask.shape == (2, 10_24) assert targets["input_ids"].shape == (2, 5) assert len(_lowerCamelCase ) == 2 # input_ids, attention_mask. @slow def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' # fmt: off __A = {'''input_ids''': [[3_89_79, 1_43, 1_84_85, 6_06, 1_30, 2_66_69, 8_76_86, 1_21, 5_41_89, 11_29, 1_11, 2_66_69, 8_76_86, 1_21, 91_14, 1_47_87, 1_21, 1_32_49, 1_58, 5_92, 9_56, 1_21, 1_46_21, 3_15_76, 1_43, 6_26_13, 1_08, 96_88, 9_30, 4_34_30, 1_15_62, 6_26_13, 3_04, 1_08, 1_14_43, 8_97, 1_08, 93_14, 1_74_15, 6_33_99, 1_08, 1_14_43, 76_14, 1_83_16, 1_18, 42_84, 71_48, 1_24_30, 1_43, 14_00, 2_57_03, 1_58, 1_11, 42_84, 71_48, 1_17_72, 1_43, 2_12_97, 10_64, 1_58, 1_22, 2_04, 35_06, 17_54, 11_33, 1_47_87, 15_81, 1_15, 3_32_24, 44_82, 1_11, 13_55, 1_10, 2_91_73, 3_17, 5_08_33, 1_08, 2_01_47, 9_46_65, 1_11, 7_71_98, 1_07, 1], [1_10, 6_26_13, 1_17, 6_38, 1_12, 11_33, 1_21, 2_00_98, 13_55, 7_90_50, 1_38_72, 1_35, 15_96, 5_35_41, 13_52, 1_41, 1_30_39, 55_42, 1_24, 3_02, 5_18, 1_11, 2_68, 29_56, 1_15, 1_49, 44_27, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_39, 12_35, 27_99, 1_82_89, 1_77_80, 2_04, 1_09, 94_74, 12_96, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_lowerCamelCase, model_name='''google/bigbird-pegasus-large-arxiv''', revision='''ba85d0851d708441f91440d509690f1ab6353415''', ) @require_sentencepiece @require_tokenizers class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : str = PegasusTokenizer A_ : Union[str, Any] = PegasusTokenizerFast A_ : Any = True A_ : str = True def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing __A = PegasusTokenizer(_lowerCamelCase, offset=0, mask_token_sent=_lowerCamelCase, mask_token='''[MASK]''' ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' ) def _SCREAMING_SNAKE_CASE ( self : Optional[int], **_lowerCamelCase : Dict ): '''simple docstring''' return PegasusTokenizer.from_pretrained(self.tmpdirname, **_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : List[str] ): '''simple docstring''' return ("This is a test", "This is a test") def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' __A = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) __A = self.tokenizer_class.from_pretrained(self.tmpdirname ) __A = ( '''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>''' ''' <pad> <pad> <pad>''' ) __A = rust_tokenizer([raw_input_str], return_tensors=_lowerCamelCase, add_special_tokens=_lowerCamelCase ).input_ids[0] __A = py_tokenizer([raw_input_str], return_tensors=_lowerCamelCase, add_special_tokens=_lowerCamelCase ).input_ids[0] self.assertListEqual(_lowerCamelCase, _lowerCamelCase ) @require_torch def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' __A = ['''This is going to be way too long.''' * 10_00, '''short example'''] __A = ['''not super long but more than 5 tokens''', '''tiny'''] __A = self._large_tokenizer(_lowerCamelCase, padding=_lowerCamelCase, truncation=_lowerCamelCase, return_tensors='''pt''' ) __A = self._large_tokenizer( text_target=_lowerCamelCase, max_length=5, padding=_lowerCamelCase, truncation=_lowerCamelCase, return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 40_96) assert batch.attention_mask.shape == (2, 40_96) assert targets["input_ids"].shape == (2, 5) assert len(_lowerCamelCase ) == 2 # input_ids, attention_mask. def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' __A = ( '''This is an example string that is used to test the original TF implementation against the HF''' ''' implementation''' ) __A = self._large_tokenizer(_lowerCamelCase ).input_ids self.assertListEqual( _lowerCamelCase, [1_82, 1_17, 1_42, 5_87, 42_11, 1_20, 1_17, 2_63, 1_12, 8_04, 1_09, 8_56, 2_50_16, 31_37, 4_64, 1_09, 2_69_55, 31_37, 1], )
266
0
def __lowerCAmelCase ( a__ , a__ = False ) -> bool: if n == 2: return True if not n % 2 or n < 2: return False if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit return False if n > 3_3170_4406_4679_8873_8596_1981 and not allow_probable: raise ValueError( '''Warning: upper bound of deterministic test is exceeded. ''' '''Pass allow_probable=True to allow probabilistic test. ''' '''A return value of True indicates a probable prime.''' ) # array bounds provided by analysis __a = [ 2047, 137_3653, 2532_6001, 32_1503_1751, 2_1523_0289_8747, 3_4747_4966_0383, 341_5500_7172_8321, 1, 382_5123_0565_4641_3051, 1, 1, 3186_6585_7834_0311_5116_7461, 3_3170_4406_4679_8873_8596_1981, ] __a = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41] for idx, _p in enumerate(a__ , 1 ): if n < _p: # then we have our last prime to check __a = primes[:idx] break __a , __a = n - 1, 0 # break up n -1 into a power of 2 (s) and # remaining odd component # essentially, solve for d * 2 ** s == n - 1 while d % 2 == 0: d //= 2 s += 1 for prime in plist: __a = False for r in range(a__ ): __a = pow(a__ , d * 2**r , a__ ) # see article for analysis explanation for m if (r == 0 and m == 1) or ((m + 1) % n == 0): __a = True # this loop will not determine compositeness break if pr: continue # if pr is False, then the above loop never evaluated to true, # and the n MUST be composite return False return True def __lowerCAmelCase ( ) -> None: assert not miller_rabin(561 ) assert miller_rabin(563 ) # 2047 assert not miller_rabin(83_8201 ) assert miller_rabin(83_8207 ) # 1_373_653 assert not miller_rabin(1731_6001 ) assert miller_rabin(1731_6017 ) # 25_326_001 assert not miller_rabin(30_7838_6641 ) assert miller_rabin(30_7838_6653 ) # 3_215_031_751 assert not miller_rabin(1_7130_4557_4801 ) assert miller_rabin(1_7130_4557_4819 ) # 2_152_302_898_747 assert not miller_rabin(2_7797_9972_8307 ) assert miller_rabin(2_7797_9972_8327 ) # 3_474_749_660_383 assert not miller_rabin(113_8500_2390_9441 ) assert miller_rabin(113_8500_2390_9527 ) # 341_550_071_728_321 assert not miller_rabin(127_5041_0188_4880_4351 ) assert miller_rabin(127_5041_0188_4880_4391 ) # 3_825_123_056_546_413_051 assert not miller_rabin(796_6646_4458_5077_8779_1867 ) assert miller_rabin(796_6646_4458_5077_8779_1951 ) # 318_665_857_834_031_151_167_461 assert not miller_rabin(5528_4067_7446_6478_9766_0333 ) assert miller_rabin(5528_4067_7446_6478_9766_0359 ) # 3_317_044_064_679_887_385_961_981 # upper limit for probabilistic test if __name__ == "__main__": test_miller_rabin()
6
"""simple docstring""" import re def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" return [char.split() for char in re.split(r'''[^ a-z A-Z 0-9 \s]''' , str_ )] def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = split_input(str_ ) return "".join( [''''''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" try: __A = split_input(__UpperCamelCase ) if upper: __A = ''''''.join( [ separator.join([char.upper() for char in sub_str] ) for sub_str in string_split ] ) else: __A = ''''''.join( [ separator.join([char.lower() for char in sub_str] ) for sub_str in string_split ] ) return res_str except IndexError: return "not valid string" def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" return to_simple_case(__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" try: __A = to_simple_case(__UpperCamelCase ) return res_str[0].lower() + res_str[1:] except IndexError: return "not valid string" def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" return to_complex_case(__UpperCamelCase , __UpperCamelCase , '''_''' ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" return to_complex_case(__UpperCamelCase , __UpperCamelCase , '''-''' ) if __name__ == "__main__": __import__('doctest').testmod()
266
0
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , ) @pytest.mark.usefixtures('sm_env' ) @parameterized_class( [ { 'framework': 'pytorch', 'script': 'run_glue.py', 'model_name_or_path': 'distilbert-base-cased', 'instance_type': 'ml.p3.16xlarge', 'results': {'train_runtime': 6_50, 'eval_accuracy': 0.7, 'eval_loss': 0.6}, }, { 'framework': 'pytorch', 'script': 'run_ddp.py', 'model_name_or_path': 'distilbert-base-cased', 'instance_type': 'ml.p3.16xlarge', 'results': {'train_runtime': 6_00, 'eval_accuracy': 0.7, 'eval_loss': 0.6}, }, { 'framework': 'tensorflow', 'script': 'run_tf_dist.py', 'model_name_or_path': 'distilbert-base-cased', 'instance_type': 'ml.p3.16xlarge', 'results': {'train_runtime': 6_00, 'eval_accuracy': 0.6, 'eval_loss': 0.7}, }, ] ) class A ( unittest.TestCase ): """simple docstring""" def snake_case__ ( self : Optional[int] )-> Union[str, Any]: '''simple docstring''' if self.framework == "pytorch": subprocess.run( F'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split(),encoding='utf-8',check=lowercase_,) assert hasattr(self,'env' ) def snake_case__ ( self : Tuple,lowercase_ : Optional[int] )-> Dict: '''simple docstring''' A__ = F'{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}' # distributed data settings A__ = {'smdistributed': {'dataparallel': {'enabled': True}}} if self.script != 'run_ddp.py' else None # creates estimator return HuggingFace( entry_point=self.script,source_dir=self.env.test_path,role=self.env.role,image_uri=self.env.image_uri,base_job_name=lowercase_,instance_count=lowercase_,instance_type=self.instance_type,debugger_hook_config=lowercase_,hyperparameters={**self.env.distributed_hyperparameters, 'model_name_or_path': self.model_name_or_path},metric_definitions=self.env.metric_definitions,distribution=lowercase_,py_version='py36',) def snake_case__ ( self : Optional[Any],lowercase_ : Tuple )-> Any: '''simple docstring''' TrainingJobAnalytics(lowercase_ ).export_csv(F'{self.env.test_path}/{job_name}_metrics.csv' ) @parameterized.expand([(2,)] ) def snake_case__ ( self : Dict,lowercase_ : Optional[Any] )-> int: '''simple docstring''' A__ = self.create_estimator(lowercase_ ) # run training estimator.fit() # result dataframe A__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis A__ = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] ) A__ = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping A__ = ( Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds',9_9_9_9_9_9 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy ) assert all(t <= self.results['eval_loss'] for t in eval_loss ) # dump tests result into json file to share in PR with open(F'{estimator.latest_training_job.name}.json','w' ) as outfile: json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss},lowercase_ )
7
"""simple docstring""" from __future__ import annotations class snake_case : '''simple docstring''' def __init__( self : int, _lowerCamelCase : List[Any]=None ): '''simple docstring''' __A = data __A = None def __repr__( self : Union[str, Any] ): '''simple docstring''' __A = [] __A = self while temp: string_rep.append(f'{temp.data}' ) __A = temp.next return "->".join(_lowerCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" if not elements_list: raise Exception('''The Elements List is empty''' ) __A = __A = Node(elements_list[0] ) for i in range(1 , len(__UpperCamelCase ) ): __A = Node(elements_list[i] ) __A = current.next return head def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" if head_node is not None and isinstance(__UpperCamelCase , __UpperCamelCase ): print_reverse(head_node.next ) print(head_node.data ) def lowerCAmelCase ( ): """simple docstring""" from doctest import testmod testmod() __A = make_linked_list([1_4, 5_2, 1_4, 1_2, 4_3] ) print('''Linked List:''' ) print(__UpperCamelCase ) print('''Elements in Reverse:''' ) print_reverse(__UpperCamelCase ) if __name__ == "__main__": main()
266
0
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/config.json''', '''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/config.json''' # See all FNet models at https://huggingface.co/models?filter=fnet } class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = "fnet" def __init__( self : List[Any] , _UpperCamelCase : Tuple=3_2_0_0_0 , _UpperCamelCase : Optional[Any]=7_6_8 , _UpperCamelCase : Union[str, Any]=1_2 , _UpperCamelCase : str=3_0_7_2 , _UpperCamelCase : Optional[int]="gelu_new" , _UpperCamelCase : Optional[int]=0.1 , _UpperCamelCase : Union[str, Any]=5_1_2 , _UpperCamelCase : Tuple=4 , _UpperCamelCase : Tuple=0.02 , _UpperCamelCase : Tuple=1e-12 , _UpperCamelCase : List[str]=False , _UpperCamelCase : Union[str, Any]=5_1_2 , _UpperCamelCase : Any=3 , _UpperCamelCase : str=1 , _UpperCamelCase : List[Any]=2 , **_UpperCamelCase : List[str] , ) ->Union[str, Any]: super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase ) snake_case_ = vocab_size snake_case_ = max_position_embeddings snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = initializer_range snake_case_ = type_vocab_size snake_case_ = layer_norm_eps snake_case_ = use_tpu_fourier_optimizations snake_case_ = tpu_short_seq_length
8
"""simple docstring""" from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging lowercase_ = logging.get_logger(__name__) class snake_case ( _lowerCAmelCase ): '''simple docstring''' A_ : int = ["input_features", "attention_mask"] def __init__( self : Optional[Any], _lowerCamelCase : Union[str, Any]=80, _lowerCamelCase : int=1_60_00, _lowerCamelCase : Any=80, _lowerCamelCase : List[str]=0.0, _lowerCamelCase : int=True, _lowerCamelCase : Optional[Any]=True, _lowerCamelCase : Optional[int]=True, **_lowerCamelCase : List[str], ): '''simple docstring''' super().__init__(feature_size=_lowerCamelCase, sampling_rate=_lowerCamelCase, padding_value=_lowerCamelCase, **_lowerCamelCase ) __A = num_mel_bins __A = do_ceptral_normalize __A = normalize_means __A = normalize_vars __A = True def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : np.ndarray, ): '''simple docstring''' __A = waveform * (2**15) # Kaldi compliance: 16-bit signed integers __A = torch.from_numpy(_lowerCamelCase ).unsqueeze(0 ) __A = ta_kaldi.fbank(_lowerCamelCase, num_mel_bins=self.num_mel_bins, sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : np.ndarray, _lowerCamelCase : int, _lowerCamelCase : Optional[bool] = True, _lowerCamelCase : Optional[bool] = True, _lowerCamelCase : float = 0.0, ): '''simple docstring''' # make sure we normalize float32 arrays if normalize_means: __A = x[:input_length].mean(axis=0 ) __A = np.subtract(_lowerCamelCase, _lowerCamelCase ) if normalize_vars: __A = x[:input_length].std(axis=0 ) __A = np.divide(_lowerCamelCase, _lowerCamelCase ) if input_length < x.shape[0]: __A = padding_value # make sure array is in float32 __A = x.astype(np.floataa ) return x def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : List[np.ndarray], _lowerCamelCase : Optional[np.ndarray] = None ): '''simple docstring''' __A = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(_lowerCamelCase, _lowerCamelCase, self.normalize_means, self.normalize_vars, self.padding_value ) for x, n in zip(_lowerCamelCase, _lowerCamelCase ) ] def __call__( self : Optional[Any], _lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], _lowerCamelCase : Union[bool, str, PaddingStrategy] = False, _lowerCamelCase : Optional[int] = None, _lowerCamelCase : bool = False, _lowerCamelCase : Optional[int] = None, _lowerCamelCase : Optional[Union[str, TensorType]] = None, _lowerCamelCase : Optional[int] = None, _lowerCamelCase : Optional[bool] = None, **_lowerCamelCase : Optional[Any], ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of' f' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with' f' {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) __A = isinstance(_lowerCamelCase, np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'Only mono-channel audio is supported for input to {self}' ) __A = is_batched_numpy or ( isinstance(_lowerCamelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) )) ) if is_batched: __A = [np.asarray(_lowerCamelCase, dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(_lowerCamelCase, np.ndarray ): __A = np.asarray(_lowerCamelCase, dtype=np.floataa ) elif isinstance(_lowerCamelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __A = raw_speech.astype(np.floataa ) # always return batch if not is_batched: __A = [raw_speech] # extract fbank features __A = [self._extract_fbank_features(_lowerCamelCase ) for waveform in raw_speech] # convert into correct format for padding __A = BatchFeature({'''input_features''': features} ) __A = self.pad( _lowerCamelCase, padding=_lowerCamelCase, max_length=_lowerCamelCase, truncation=_lowerCamelCase, pad_to_multiple_of=_lowerCamelCase, return_attention_mask=_lowerCamelCase, **_lowerCamelCase, ) # make sure list is in array format __A = padded_inputs.get('''input_features''' ) if isinstance(input_features[0], _lowerCamelCase ): __A = [np.asarray(_lowerCamelCase, dtype=np.floataa ) for feature in input_features] __A = padded_inputs.get('''attention_mask''' ) if attention_mask is not None: __A = [np.asarray(_lowerCamelCase, dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: __A = ( np.array(_lowerCamelCase, dtype=np.intaa ) if self._get_padding_strategies(_lowerCamelCase, max_length=_lowerCamelCase ) is not PaddingStrategy.DO_NOT_PAD else None ) __A = self.normalize( padded_inputs['''input_features'''], attention_mask=_lowerCamelCase ) if return_tensors is not None: __A = padded_inputs.convert_to_tensors(_lowerCamelCase ) return padded_inputs
266
0
def _UpperCamelCase ( lowercase__ ): __SCREAMING_SNAKE_CASE : Tuple = 1 __SCREAMING_SNAKE_CASE : Optional[int] = 2 while i * i <= n: __SCREAMING_SNAKE_CASE : Optional[Any] = 0 while n % i == 0: n //= i multiplicity += 1 n_divisors *= multiplicity + 1 i += 1 if n > 1: n_divisors *= 2 return n_divisors def _UpperCamelCase ( ): __SCREAMING_SNAKE_CASE : Tuple = 1 __SCREAMING_SNAKE_CASE : List[str] = 1 while True: i += 1 t_num += i if count_divisors(lowercase__ ) > 500: break return t_num if __name__ == "__main__": print(solution())
9
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class snake_case ( unittest.TestCase ): '''simple docstring''' def __init__( self : List[str], _lowerCamelCase : Optional[Any], _lowerCamelCase : Union[str, Any]=13, _lowerCamelCase : Any=3, _lowerCamelCase : Optional[int]=2_24, _lowerCamelCase : str=30, _lowerCamelCase : Dict=4_00, _lowerCamelCase : Union[str, Any]=True, _lowerCamelCase : Any=None, _lowerCamelCase : Optional[Any]=True, _lowerCamelCase : Any=[0.5, 0.5, 0.5], _lowerCamelCase : List[str]=[0.5, 0.5, 0.5], ): '''simple docstring''' __A = size if size is not None else {'''height''': 18, '''width''': 18} __A = parent __A = batch_size __A = num_channels __A = image_size __A = min_resolution __A = max_resolution __A = do_resize __A = size __A = do_normalize __A = image_mean __A = image_std def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : str = ViTImageProcessor if is_vision_available() else None def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = EfficientFormerImageProcessorTester(self ) @property def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' return self.image_proc_tester.prepare_image_processor_dict() def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCamelCase, '''image_mean''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''image_std''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''do_normalize''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''do_resize''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''size''' ) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' # Initialize image_processor __A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __A = prepare_image_inputs(self.image_proc_tester, equal_resolution=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, Image.Image ) # Test not batched input __A = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), ) # Test batched __A = image_processor(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), ) def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' # Initialize image_processor __A = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __A = prepare_image_inputs(self.image_proc_tester, equal_resolution=_lowerCamelCase, numpify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, np.ndarray ) # Test not batched input __A = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), ) # Test batched __A = image_processor(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), ) def _SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' # Initialize image_processor __A = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __A = prepare_image_inputs(self.image_proc_tester, equal_resolution=_lowerCamelCase, torchify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, torch.Tensor ) # Test not batched input __A = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), ) # Test batched __A = image_processor(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), )
266
0
from ..utils import DummyObject, requires_backends class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = ["torch", "transformers", "onnx"] def __init__(self : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Dict) ->Dict: '''simple docstring''' requires_backends(self , ["torch", "transformers", "onnx"]) @classmethod def SCREAMING_SNAKE_CASE_ (cls : Optional[Any] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : str) ->Any: '''simple docstring''' requires_backends(cls , ["torch", "transformers", "onnx"]) @classmethod def SCREAMING_SNAKE_CASE_ (cls : List[Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Optional[Any]) ->Union[str, Any]: '''simple docstring''' requires_backends(cls , ["torch", "transformers", "onnx"]) class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = ["torch", "transformers", "onnx"] def __init__(self : List[str] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[int]) ->Any: '''simple docstring''' requires_backends(self , ["torch", "transformers", "onnx"]) @classmethod def SCREAMING_SNAKE_CASE_ (cls : Tuple , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Any) ->List[str]: '''simple docstring''' requires_backends(cls , ["torch", "transformers", "onnx"]) @classmethod def SCREAMING_SNAKE_CASE_ (cls : Union[str, Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : int) ->Optional[Any]: '''simple docstring''' requires_backends(cls , ["torch", "transformers", "onnx"]) class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = ["torch", "transformers", "onnx"] def __init__(self : Optional[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : int) ->int: '''simple docstring''' requires_backends(self , ["torch", "transformers", "onnx"]) @classmethod def SCREAMING_SNAKE_CASE_ (cls : Dict , *UpperCAmelCase_ : str , **UpperCAmelCase_ : List[Any]) ->Union[str, Any]: '''simple docstring''' requires_backends(cls , ["torch", "transformers", "onnx"]) @classmethod def SCREAMING_SNAKE_CASE_ (cls : Tuple , *UpperCAmelCase_ : str , **UpperCAmelCase_ : List[Any]) ->str: '''simple docstring''' requires_backends(cls , ["torch", "transformers", "onnx"]) class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = ["torch", "transformers", "onnx"] def __init__(self : Dict , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[int]) ->Union[str, Any]: '''simple docstring''' requires_backends(self , ["torch", "transformers", "onnx"]) @classmethod def SCREAMING_SNAKE_CASE_ (cls : Optional[Any] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : List[str]) ->int: '''simple docstring''' requires_backends(cls , ["torch", "transformers", "onnx"]) @classmethod def SCREAMING_SNAKE_CASE_ (cls : Dict , *UpperCAmelCase_ : str , **UpperCAmelCase_ : str) ->List[str]: '''simple docstring''' requires_backends(cls , ["torch", "transformers", "onnx"]) class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = ["torch", "transformers", "onnx"] def __init__(self : int , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[int]) ->Dict: '''simple docstring''' requires_backends(self , ["torch", "transformers", "onnx"]) @classmethod def SCREAMING_SNAKE_CASE_ (cls : str , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Optional[int]) ->List[str]: '''simple docstring''' requires_backends(cls , ["torch", "transformers", "onnx"]) @classmethod def SCREAMING_SNAKE_CASE_ (cls : Optional[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Dict) ->Union[str, Any]: '''simple docstring''' requires_backends(cls , ["torch", "transformers", "onnx"]) class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = ["torch", "transformers", "onnx"] def __init__(self : Dict , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Tuple) ->Dict: '''simple docstring''' requires_backends(self , ["torch", "transformers", "onnx"]) @classmethod def SCREAMING_SNAKE_CASE_ (cls : Dict , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Tuple) ->Union[str, Any]: '''simple docstring''' requires_backends(cls , ["torch", "transformers", "onnx"]) @classmethod def SCREAMING_SNAKE_CASE_ (cls : str , *UpperCAmelCase_ : str , **UpperCAmelCase_ : int) ->Optional[int]: '''simple docstring''' requires_backends(cls , ["torch", "transformers", "onnx"])
10
"""simple docstring""" import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor lowercase_ = logging.get_logger(__name__) class snake_case ( _lowerCAmelCase ): '''simple docstring''' def __init__( self : Optional[int], *_lowerCamelCase : Union[str, Any], **_lowerCamelCase : Dict ): '''simple docstring''' warnings.warn( '''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use SegformerImageProcessor instead.''', _lowerCamelCase, ) super().__init__(*_lowerCamelCase, **_lowerCamelCase )
266
0
from math import factorial def _UpperCAmelCase (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float ): if successes > trials: raise ValueError("successes must be lower or equal to trials" ) if trials < 0 or successes < 0: raise ValueError("the function is defined for non-negative integers" ) if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise ValueError("the function is defined for non-negative integers" ) if not 0 < prob < 1: raise ValueError("prob has to be in range of 1 - 0" ) _A : Optional[Any] = (prob**successes) * ((1 - prob) ** (trials - successes)) # Calculate the binomial coefficient: n! / k!(n-k)! _A : Dict = float(factorial(UpperCamelCase__ ) ) coefficient /= factorial(UpperCamelCase__ ) * factorial(trials - successes ) return probability * coefficient if __name__ == "__main__": from doctest import testmod testmod() print('Probability of 2 successes out of 4 trails') print('with probability of 0.75 is:', end=' ') print(binomial_distribution(2, 4, 0.75))
11
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class snake_case ( unittest.TestCase ): '''simple docstring''' def __init__( self : List[Any], _lowerCamelCase : int, _lowerCamelCase : List[Any]=7, _lowerCamelCase : int=3, _lowerCamelCase : Optional[Any]=18, _lowerCamelCase : Any=30, _lowerCamelCase : str=4_00, _lowerCamelCase : int=True, _lowerCamelCase : Union[str, Any]=None, _lowerCamelCase : str=True, ): '''simple docstring''' __A = size if size is not None else {'''height''': 18, '''width''': 18} __A = parent __A = batch_size __A = num_channels __A = image_size __A = min_resolution __A = max_resolution __A = do_resize __A = size __A = apply_ocr def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : Optional[int] = LayoutLMvaImageProcessor if is_pytesseract_available() else None def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = LayoutLMvaImageProcessingTester(self ) @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' __A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCamelCase, '''do_resize''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''size''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''apply_ocr''' ) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' __A = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size, {'''height''': 18, '''width''': 18} ) __A = self.image_processing_class.from_dict(self.image_processor_dict, size=42 ) self.assertEqual(image_processor.size, {'''height''': 42, '''width''': 42} ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' # Initialize image_processing __A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, Image.Image ) # Test not batched input __A = image_processing(image_inputs[0], return_tensors='''pt''' ) self.assertEqual( encoding.pixel_values.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) self.assertIsInstance(encoding.words, _lowerCamelCase ) self.assertIsInstance(encoding.boxes, _lowerCamelCase ) # Test batched __A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' # Initialize image_processing __A = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase, numpify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, np.ndarray ) # Test not batched input __A = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) # Test batched __A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' # Initialize image_processing __A = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase, torchify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, torch.Tensor ) # Test not batched input __A = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) # Test batched __A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) def _SCREAMING_SNAKE_CASE ( self : List[str] ): '''simple docstring''' # with apply_OCR = True __A = LayoutLMvaImageProcessor() from datasets import load_dataset __A = load_dataset('''hf-internal-testing/fixtures_docvqa''', split='''test''' ) __A = Image.open(ds[0]['''file'''] ).convert('''RGB''' ) __A = image_processing(_lowerCamelCase, return_tensors='''pt''' ) self.assertEqual(encoding.pixel_values.shape, (1, 3, 2_24, 2_24) ) self.assertEqual(len(encoding.words ), len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 __A = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231 __A = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words, _lowerCamelCase ) self.assertListEqual(encoding.boxes, _lowerCamelCase ) # with apply_OCR = False __A = LayoutLMvaImageProcessor(apply_ocr=_lowerCamelCase ) __A = image_processing(_lowerCamelCase, return_tensors='''pt''' ) self.assertEqual(encoding.pixel_values.shape, (1, 3, 2_24, 2_24) )
266
0
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, is_batched, to_numpy_array, valid_images, ) from ...utils import TensorType, logging UpperCAmelCase_ = logging.get_logger(__name__) class lowerCamelCase__( __lowerCamelCase): UpperCAmelCase__ : Optional[int] = ['pixel_values'] def __init__( self: List[Any] , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[Dict[str, int]] = None , UpperCamelCase_: PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase_: bool = True , UpperCamelCase_: bool = True , UpperCamelCase_: Union[int, float] = 1 / 2_55 , UpperCamelCase_: Dict[str, int] = None , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[Union[float, List[float]]] = None , UpperCamelCase_: Optional[Union[float, List[float]]] = None , **UpperCamelCase_: List[str] , ): super().__init__(**UpperCamelCase_ ) __lowerCamelCase = size if size is not None else {"""height""": 2_24, """width""": 2_24} __lowerCamelCase = get_size_dict(UpperCamelCase_ ) __lowerCamelCase = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24} __lowerCamelCase = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ , param_name="""crop_size""" ) __lowerCamelCase = do_resize __lowerCamelCase = do_rescale __lowerCamelCase = do_normalize __lowerCamelCase = do_center_crop __lowerCamelCase = crop_size __lowerCamelCase = size __lowerCamelCase = resample __lowerCamelCase = rescale_factor __lowerCamelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN __lowerCamelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: np.ndarray , UpperCamelCase_: Dict[str, int] , UpperCamelCase_: PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: Optional[int] , ): __lowerCamelCase = get_size_dict(UpperCamelCase_ ) if "shortest_edge" in size: __lowerCamelCase = get_resize_output_image_size(UpperCamelCase_ , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase_ ) # size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"]) elif "height" in size and "width" in size: __lowerCamelCase = (size["""height"""], size["""width"""]) else: raise ValueError(F'Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}' ) return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: np.ndarray , UpperCamelCase_: Dict[str, int] , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: Dict , ): __lowerCamelCase = get_size_dict(UpperCamelCase_ ) if "height" not in size or "width" not in size: raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' ) return center_crop(UpperCamelCase_ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: np.ndarray , UpperCamelCase_: float , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: List[Any] ): return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: np.ndarray , UpperCamelCase_: Union[float, List[float]] , UpperCamelCase_: Union[float, List[float]] , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: List[Any] , ): return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: ImageInput , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Dict[str, int] = None , UpperCamelCase_: PILImageResampling = None , UpperCamelCase_: bool = None , UpperCamelCase_: int = None , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[float] = None , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[Union[float, List[float]]] = None , UpperCamelCase_: Optional[Union[float, List[float]]] = None , UpperCamelCase_: Optional[Union[str, TensorType]] = None , UpperCamelCase_: Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase_: Tuple , ): __lowerCamelCase = do_resize if do_resize is not None else self.do_resize __lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale __lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize __lowerCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop __lowerCamelCase = crop_size if crop_size is not None else self.crop_size __lowerCamelCase = get_size_dict(UpperCamelCase_ , param_name="""crop_size""" , default_to_square=UpperCamelCase_ ) __lowerCamelCase = resample if resample is not None else self.resample __lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor __lowerCamelCase = image_mean if image_mean is not None else self.image_mean __lowerCamelCase = image_std if image_std is not None else self.image_std __lowerCamelCase = size if size is not None else self.size __lowerCamelCase = get_size_dict(UpperCamelCase_ ) if not is_batched(UpperCamelCase_ ): __lowerCamelCase = [images] if not valid_images(UpperCamelCase_ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) # All transformations expect numpy arrays. __lowerCamelCase = [to_numpy_array(UpperCamelCase_ ) for image in images] if do_resize: __lowerCamelCase = [self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images] if do_center_crop: __lowerCamelCase = [self.center_crop(image=UpperCamelCase_ , size=UpperCamelCase_ ) for image in images] if do_rescale: __lowerCamelCase = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images] if do_normalize: __lowerCamelCase = [self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ ) for image in images] __lowerCamelCase = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images] __lowerCamelCase = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
12
"""simple docstring""" import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class snake_case ( ctypes.Structure ): '''simple docstring''' A_ : List[str] = [("size", ctypes.c_int), ("visible", ctypes.c_byte)] def lowerCAmelCase ( ): """simple docstring""" if os.name == "nt": __A = CursorInfo() __A = ctypes.windll.kernelaa.GetStdHandle(-1_1 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) ) __A = False ctypes.windll.kernelaa.SetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) ) elif os.name == "posix": sys.stdout.write('''\033[?25l''' ) sys.stdout.flush() def lowerCAmelCase ( ): """simple docstring""" if os.name == "nt": __A = CursorInfo() __A = ctypes.windll.kernelaa.GetStdHandle(-1_1 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) ) __A = True ctypes.windll.kernelaa.SetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) ) elif os.name == "posix": sys.stdout.write('''\033[?25h''' ) sys.stdout.flush() @contextmanager def lowerCAmelCase ( ): """simple docstring""" try: hide_cursor() yield finally: show_cursor()
266
0
import itertools import string from collections.abc import Generator, Iterable def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: str = iter(_UpperCAmelCase ) while True: SCREAMING_SNAKE_CASE_: Dict = tuple(itertools.islice(_UpperCAmelCase , _UpperCAmelCase ) ) if not chunk: return yield chunk def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Any = "".join([c.upper() for c in dirty if c in string.ascii_letters] ) SCREAMING_SNAKE_CASE_: Any = "" if len(_UpperCAmelCase ) < 2: return dirty for i in range(len(_UpperCAmelCase ) - 1 ): clean += dirty[i] if dirty[i] == dirty[i + 1]: clean += "X" clean += dirty[-1] if len(_UpperCAmelCase ) & 1: clean += "X" return clean def A_ ( _UpperCAmelCase ): # I and J are used interchangeably to allow # us to use a 5x5 table (25 letters) SCREAMING_SNAKE_CASE_: Tuple = "ABCDEFGHIKLMNOPQRSTUVWXYZ" # we're using a list instead of a '2d' array because it makes the math # for setting up the table and doing the actual encoding/decoding simpler SCREAMING_SNAKE_CASE_: List[str] = [] # copy key chars into the table if they are in `alphabet` ignoring duplicates for char in key.upper(): if char not in table and char in alphabet: table.append(_UpperCAmelCase ) # fill the rest of the table in with the remaining alphabet chars for char in alphabet: if char not in table: table.append(_UpperCAmelCase ) return table def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: str = generate_table(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Optional[Any] = prepare_input(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Dict = "" # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(_UpperCAmelCase , 2 ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = divmod(table.index(_UpperCAmelCase ) , 5 ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = divmod(table.index(_UpperCAmelCase ) , 5 ) if rowa == rowa: ciphertext += table[rowa * 5 + (cola + 1) % 5] ciphertext += table[rowa * 5 + (cola + 1) % 5] elif cola == cola: ciphertext += table[((rowa + 1) % 5) * 5 + cola] ciphertext += table[((rowa + 1) % 5) * 5 + cola] else: # rectangle ciphertext += table[rowa * 5 + cola] ciphertext += table[rowa * 5 + cola] return ciphertext def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Any = generate_table(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: int = "" # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(_UpperCAmelCase , 2 ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = divmod(table.index(_UpperCAmelCase ) , 5 ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = divmod(table.index(_UpperCAmelCase ) , 5 ) if rowa == rowa: plaintext += table[rowa * 5 + (cola - 1) % 5] plaintext += table[rowa * 5 + (cola - 1) % 5] elif cola == cola: plaintext += table[((rowa - 1) % 5) * 5 + cola] plaintext += table[((rowa - 1) % 5) * 5 + cola] else: # rectangle plaintext += table[rowa * 5 + cola] plaintext += table[rowa * 5 + cola] return plaintext
13
"""simple docstring""" import argparse import struct import unittest class snake_case : '''simple docstring''' def __init__( self : Optional[int], _lowerCamelCase : bytes ): '''simple docstring''' __A = data # Initialize hash values __A = [ 0X6a_09e_667, 0Xbb_67a_e85, 0X3c_6ef_372, 0Xa5_4ff_53a, 0X51_0e5_27f, 0X9b_056_88c, 0X1f_83d_9ab, 0X5b_e0c_d19, ] # Initialize round constants __A = [ 0X42_8a2_f98, 0X71_374_491, 0Xb5_c0f_bcf, 0Xe9_b5d_ba5, 0X39_56c_25b, 0X59_f11_1f1, 0X92_3f8_2a4, 0Xab_1c5_ed5, 0Xd8_07a_a98, 0X12_835_b01, 0X24_318_5be, 0X55_0c7_dc3, 0X72_be5_d74, 0X80_deb_1fe, 0X9b_dc0_6a7, 0Xc1_9bf_174, 0Xe4_9b6_9c1, 0Xef_be4_786, 0X0f_c19_dc6, 0X24_0ca_1cc, 0X2d_e92_c6f, 0X4a_748_4aa, 0X5c_b0a_9dc, 0X76_f98_8da, 0X98_3e5_152, 0Xa8_31c_66d, 0Xb0_032_7c8, 0Xbf_597_fc7, 0Xc6_e00_bf3, 0Xd5_a79_147, 0X06_ca6_351, 0X14_292_967, 0X27_b70_a85, 0X2e_1b2_138, 0X4d_2c6_dfc, 0X53_380_d13, 0X65_0a7_354, 0X76_6a0_abb, 0X81_c2c_92e, 0X92_722_c85, 0Xa2_bfe_8a1, 0Xa8_1a6_64b, 0Xc2_4b8_b70, 0Xc7_6c5_1a3, 0Xd1_92e_819, 0Xd6_990_624, 0Xf4_0e3_585, 0X10_6aa_070, 0X19_a4c_116, 0X1e_376_c08, 0X27_487_74c, 0X34_b0b_cb5, 0X39_1c0_cb3, 0X4e_d8a_a4a, 0X5b_9cc_a4f, 0X68_2e6_ff3, 0X74_8f8_2ee, 0X78_a56_36f, 0X84_c87_814, 0X8c_c70_208, 0X90_bef_ffa, 0Xa4_506_ceb, 0Xbe_f9a_3f7, 0Xc6_717_8f2, ] __A = self.preprocessing(self.data ) self.final_hash() @staticmethod def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : bytes ): '''simple docstring''' __A = b'''\x80''' + (b'''\x00''' * (63 - (len(_lowerCamelCase ) + 8) % 64)) __A = struct.pack('''>Q''', (len(_lowerCamelCase ) * 8) ) return data + padding + big_endian_integer def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' # Convert into blocks of 64 bytes __A = [ self.preprocessed_data[x : x + 64] for x in range(0, len(self.preprocessed_data ), 64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers __A = list(struct.unpack('''>16L''', _lowerCamelCase ) ) # add 48 0-ed integers words += [0] * 48 __A , __A , __A , __A , __A , __A , __A , __A = self.hashes for index in range(0, 64 ): if index > 15: # modify the zero-ed indexes at the end of the array __A = ( self.ror(words[index - 15], 7 ) ^ self.ror(words[index - 15], 18 ) ^ (words[index - 15] >> 3) ) __A = ( self.ror(words[index - 2], 17 ) ^ self.ror(words[index - 2], 19 ) ^ (words[index - 2] >> 10) ) __A = ( words[index - 16] + sa + words[index - 7] + sa ) % 0X100_000_000 # Compression __A = self.ror(_lowerCamelCase, 6 ) ^ self.ror(_lowerCamelCase, 11 ) ^ self.ror(_lowerCamelCase, 25 ) __A = (e & f) ^ ((~e & 0Xff_fff_fff) & g) __A = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0X100_000_000 __A = self.ror(_lowerCamelCase, 2 ) ^ self.ror(_lowerCamelCase, 13 ) ^ self.ror(_lowerCamelCase, 22 ) __A = (a & b) ^ (a & c) ^ (b & c) __A = (sa + maj) % 0X100_000_000 __A , __A , __A , __A , __A , __A , __A , __A = ( g, f, e, ((d + tempa) % 0X100_000_000), c, b, a, ((tempa + tempa) % 0X100_000_000), ) __A = [a, b, c, d, e, f, g, h] # Modify final values __A = [ ((element + mutated_hash_values[index]) % 0X100_000_000) for index, element in enumerate(self.hashes ) ] __A = ''''''.join([hex(_lowerCamelCase )[2:].zfill(8 ) for value in self.hashes] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : int, _lowerCamelCase : int ): '''simple docstring''' return 0Xff_fff_fff & (value << (32 - rotations)) | (value >> rotations) class snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' import hashlib __A = bytes('''Test String''', '''utf-8''' ) self.assertEqual(SHAaaa(_lowerCamelCase ).hash, hashlib.shaaaa(_lowerCamelCase ).hexdigest() ) def lowerCAmelCase ( ): """simple docstring""" import doctest doctest.testmod() __A = argparse.ArgumentParser() parser.add_argument( '''-s''' , '''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , ) parser.add_argument( '''-f''' , '''--file''' , dest='''input_file''' , help='''Hash contents of a file''' ) __A = parser.parse_args() __A = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , '''rb''' ) as f: __A = f.read() else: __A = bytes(__UpperCamelCase , '''utf-8''' ) print(SHAaaa(__UpperCamelCase ).hash ) if __name__ == "__main__": main()
266
0
from __future__ import annotations import pandas as pd def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> list[int]: """simple docstring""" A__ = [0] * no_of_processes A__ = [0] * no_of_processes # Copy the burst time into remaining_time[] for i in range(lowercase_ ): A__ = burst_time[i] A__ = 0 A__ = 0 A__ = 999_999_999 A__ = 0 A__ = False # Process until all processes are completed while complete != no_of_processes: for j in range(lowercase_ ): if arrival_time[j] <= increment_time and remaining_time[j] > 0: if remaining_time[j] < minm: A__ = remaining_time[j] A__ = j A__ = True if not check: increment_time += 1 continue remaining_time[short] -= 1 A__ = remaining_time[short] if minm == 0: A__ = 999_999_999 if remaining_time[short] == 0: complete += 1 A__ = False # Find finish time of current process A__ = increment_time + 1 # Calculate waiting time A__ = finish_time - arrival_time[short] A__ = finar - burst_time[short] if waiting_time[short] < 0: A__ = 0 # Increment time increment_time += 1 return waiting_time def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> list[int]: """simple docstring""" A__ = [0] * no_of_processes for i in range(lowercase_ ): A__ = burst_time[i] + waiting_time[i] return turn_around_time def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> None: """simple docstring""" A__ = 0 A__ = 0 for i in range(lowercase_ ): A__ = total_waiting_time + waiting_time[i] A__ = total_turn_around_time + turn_around_time[i] print(f"""Average waiting time = {total_waiting_time / no_of_processes:.5f}""" ) print('''Average turn around time =''' , total_turn_around_time / no_of_processes ) if __name__ == "__main__": print("""Enter how many process you want to analyze""") _lowerCamelCase : str = int(input()) _lowerCamelCase : Optional[int] = [0] * no_of_processes _lowerCamelCase : str = [0] * no_of_processes _lowerCamelCase : Tuple = list(range(1, no_of_processes + 1)) for i in range(no_of_processes): print("""Enter the arrival time and burst time for process:--""" + str(i + 1)) _lowerCamelCase , _lowerCamelCase : Union[str, Any] = map(int, input().split()) _lowerCamelCase : List[str] = calculate_waitingtime(arrival_time, burst_time, no_of_processes) _lowerCamelCase : Any = burst_time _lowerCamelCase : Optional[Any] = no_of_processes _lowerCamelCase : Any = waiting_time _lowerCamelCase : Optional[int] = calculate_turnaroundtime(bt, n, wt) calculate_average_times(waiting_time, turn_around_time, no_of_processes) _lowerCamelCase : Optional[Any] = pd.DataFrame( list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)), columns=[ """Process""", """BurstTime""", """ArrivalTime""", """WaitingTime""", """TurnAroundTime""", ], ) # Printing the dataFrame pd.set_option("""display.max_rows""", fcfs.shape[0] + 1) print(fcfs)
14
"""simple docstring""" import faiss # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import requests # noqa: F401 # Here to have a nice missing dependency error message early on import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on from mauve import compute_mauve # From: mauve-text import datasets lowercase_ = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n' lowercase_ = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n' lowercase_ = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case ( datasets.Metric ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, homepage='''https://github.com/krishnap25/mauve''', inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { '''predictions''': datasets.Value('''string''', id='''sequence''' ), '''references''': datasets.Value('''string''', id='''sequence''' ), } ), codebase_urls=['''https://github.com/krishnap25/mauve'''], reference_urls=[ '''https://arxiv.org/abs/2102.01454''', '''https://github.com/krishnap25/mauve''', ], ) def _SCREAMING_SNAKE_CASE ( self : int, _lowerCamelCase : str, _lowerCamelCase : Optional[Any], _lowerCamelCase : Any=None, _lowerCamelCase : Tuple=None, _lowerCamelCase : Optional[Any]=None, _lowerCamelCase : Union[str, Any]=None, _lowerCamelCase : str="auto", _lowerCamelCase : Union[str, Any]=-1, _lowerCamelCase : List[str]=0.9, _lowerCamelCase : int=5, _lowerCamelCase : Tuple=5_00, _lowerCamelCase : Union[str, Any]="gpt2-large", _lowerCamelCase : int=-1, _lowerCamelCase : Union[str, Any]=10_24, _lowerCamelCase : Union[str, Any]=25, _lowerCamelCase : str=5, _lowerCamelCase : Any=True, _lowerCamelCase : Union[str, Any]=25, ): '''simple docstring''' __A = compute_mauve( p_text=_lowerCamelCase, q_text=_lowerCamelCase, p_features=_lowerCamelCase, q_features=_lowerCamelCase, p_tokens=_lowerCamelCase, q_tokens=_lowerCamelCase, num_buckets=_lowerCamelCase, pca_max_data=_lowerCamelCase, kmeans_explained_var=_lowerCamelCase, kmeans_num_redo=_lowerCamelCase, kmeans_max_iter=_lowerCamelCase, featurize_model_name=_lowerCamelCase, device_id=_lowerCamelCase, max_text_length=_lowerCamelCase, divergence_curve_discretization_size=_lowerCamelCase, mauve_scaling_factor=_lowerCamelCase, verbose=_lowerCamelCase, seed=_lowerCamelCase, ) return out
266
0
import json import os from typing import Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE :int = logging.get_logger(__name__) SCREAMING_SNAKE_CASE :Union[str, Any] = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', } SCREAMING_SNAKE_CASE :List[str] = { 'vocab_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'}, 'merges_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'}, } SCREAMING_SNAKE_CASE :List[Any] = { 'ctrl': 256, } SCREAMING_SNAKE_CASE :Optional[Any] = { 'Pregnancy': 16_8629, 'Christianity': 7675, 'Explain': 10_6423, 'Fitness': 6_3440, 'Saving': 6_3163, 'Ask': 2_7171, 'Ass': 9_5985, 'Joke': 16_3509, 'Questions': 4_5622, 'Thoughts': 4_9605, 'Retail': 5_2342, 'Feminism': 16_4338, 'Writing': 1_1992, 'Atheism': 19_2263, 'Netflix': 4_8616, 'Computing': 3_9639, 'Opinion': 4_3213, 'Alone': 4_4967, 'Funny': 5_8917, 'Gaming': 4_0358, 'Human': 4088, 'India': 1331, 'Joker': 7_7138, 'Diet': 3_6206, 'Legal': 1_1859, 'Norman': 4939, 'Tip': 7_2689, 'Weight': 5_2343, 'Movies': 4_6273, 'Running': 2_3425, 'Science': 2090, 'Horror': 3_7793, 'Confession': 6_0572, 'Finance': 1_2250, 'Politics': 1_6360, 'Scary': 19_1985, 'Support': 1_2654, 'Technologies': 3_2516, 'Teenage': 6_6160, 'Event': 3_2769, 'Learned': 6_7460, 'Notion': 18_2770, 'Wikipedia': 3_7583, 'Books': 6665, 'Extract': 7_6050, 'Confessions': 10_2701, 'Conspiracy': 7_5932, 'Links': 6_3674, 'Narcissus': 15_0425, 'Relationship': 5_4766, 'Relationships': 13_4796, 'Reviews': 4_1671, 'News': 4256, 'Translation': 2_6820, 'multilingual': 12_8406, } def UpperCAmelCase ( a_ ) -> Union[str, Any]: """simple docstring""" __A = set() __A = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __A = char __A = set(a_ ) return pairs class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = CONTROL_CODES def __init__( self : Dict ,A : List[Any] ,A : Tuple ,A : Optional[int]="<unk>" ,**A : str ): super().__init__(unk_token=A ,**A ) with open(A ,encoding="utf-8" ) as vocab_handle: __A = json.load(A ) __A = {v: k for k, v in self.encoder.items()} with open(A ,encoding="utf-8" ) as merges_handle: __A = merges_handle.read().split("\n" )[1:-1] __A = [tuple(merge.split() ) for merge in merges] __A = dict(zip(A ,range(len(A ) ) ) ) __A = {} @property def UpperCamelCase_ ( self : int ): return len(self.encoder ) def UpperCamelCase_ ( self : Optional[Any] ): return dict(self.encoder ,**self.added_tokens_encoder ) def UpperCamelCase_ ( self : Any ,A : Optional[Any] ): if token in self.cache: return self.cache[token] __A = tuple(A ) __A = tuple(list(word[:-1] ) + [word[-1] + "</w>"] ) __A = get_pairs(A ) if not pairs: return token while True: __A = min(A ,key=lambda A : self.bpe_ranks.get(A ,float("inf" ) ) ) if bigram not in self.bpe_ranks: break __A , __A = bigram __A = [] __A = 0 while i < len(A ): try: __A = word.index(A ,A ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __A = j if word[i] == first and i < len(A ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __A = tuple(A ) __A = new_word if len(A ) == 1: break else: __A = get_pairs(A ) __A = "@@ ".join(A ) __A = word[:-4] __A = word return word def UpperCamelCase_ ( self : Tuple ,A : Optional[Any] ): __A = [] __A = re.findall(R"\S+\n?" ,A ) for token in words: split_tokens.extend(list(self.bpe(A ).split(" " ) ) ) return split_tokens def UpperCamelCase_ ( self : int ,A : Optional[Any] ): return self.encoder.get(A ,self.encoder.get(self.unk_token ) ) def UpperCamelCase_ ( self : List[str] ,A : Dict ): return self.decoder.get(A ,self.unk_token ) def UpperCamelCase_ ( self : Optional[int] ,A : Union[str, Any] ): __A = " ".join(A ).replace("@@ " ,"" ).strip() return out_string def UpperCamelCase_ ( self : Dict ,A : str ,A : Optional[str] = None ): if not os.path.isdir(A ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __A = os.path.join( A ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) __A = os.path.join( A ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(A ,"w" ,encoding="utf-8" ) as f: f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=A ,ensure_ascii=A ) + "\n" ) __A = 0 with open(A ,"w" ,encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda A : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' " Please check that the tokenizer is not corrupted!" ) __A = token_index writer.write(" ".join(A ) + "\n" ) index += 1 return vocab_file, merge_file # def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True): # filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)) # tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens) # tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far) # return ''.join(tokens_generated_so_far)
15
"""simple docstring""" import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs lowercase_ = imread(R'digital_image_processing/image_data/lena_small.jpg') lowercase_ = cvtColor(img, COLOR_BGR2GRAY) def lowerCAmelCase ( ): """simple docstring""" __A = cn.convert_to_negative(__UpperCamelCase ) # assert negative_img array for at least one True assert negative_img.any() def lowerCAmelCase ( ): """simple docstring""" with Image.open('''digital_image_processing/image_data/lena_small.jpg''' ) as img: # Work around assertion for response assert str(cc.change_contrast(__UpperCamelCase , 1_1_0 ) ).startswith( '''<PIL.Image.Image image mode=RGB size=100x100 at''' ) def lowerCAmelCase ( ): """simple docstring""" __A = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def lowerCAmelCase ( ): """simple docstring""" __A = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0 ) # assert ambiguous array for all == True assert canny_img.all() __A = canny.canny(__UpperCamelCase ) # assert canny array for at least one True assert canny_array.any() def lowerCAmelCase ( ): """simple docstring""" assert gg.gaussian_filter(__UpperCamelCase , 5 , sigma=0.9 ).all() def lowerCAmelCase ( ): """simple docstring""" __A = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) __A = conv.img_convolve(__UpperCamelCase , __UpperCamelCase ).astype(__UpperCamelCase ) assert res.any() def lowerCAmelCase ( ): """simple docstring""" assert med.median_filter(__UpperCamelCase , 3 ).any() def lowerCAmelCase ( ): """simple docstring""" __A , __A = sob.sobel_filter(__UpperCamelCase ) assert grad.any() and theta.any() def lowerCAmelCase ( ): """simple docstring""" __A = sp.make_sepia(__UpperCamelCase , 2_0 ) assert sepia.all() def lowerCAmelCase ( __UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" ): """simple docstring""" __A = bs.Burkes(imread(__UpperCamelCase , 1 ) , 1_2_0 ) burkes.process() assert burkes.output_img.any() def lowerCAmelCase ( __UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" , ): """simple docstring""" __A = rs.NearestNeighbour(imread(__UpperCamelCase , 1 ) , 4_0_0 , 2_0_0 ) nn.process() assert nn.output.any() def lowerCAmelCase ( ): """simple docstring""" __A = '''digital_image_processing/image_data/lena.jpg''' # Reading the image and converting it to grayscale. __A = imread(__UpperCamelCase , 0 ) # Test for get_neighbors_pixel function() return not None __A = 0 __A = 0 __A = image[x_coordinate][y_coordinate] __A = lbp.get_neighbors_pixel( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image __A = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): __A = lbp.local_binary_value(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) assert lbp_image.any()
266
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase_ = { 'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'], 'feature_extraction_whisper': ['WhisperFeatureExtractor'], 'processing_whisper': ['WhisperProcessor'], 'tokenization_whisper': ['WhisperTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['WhisperTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ 'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST', 'WhisperForConditionalGeneration', 'WhisperModel', 'WhisperPreTrainedModel', 'WhisperForAudioClassification', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ 'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFWhisperForConditionalGeneration', 'TFWhisperModel', 'TFWhisperPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ 'FlaxWhisperForConditionalGeneration', 'FlaxWhisperModel', 'FlaxWhisperPreTrainedModel', 'FlaxWhisperForAudioClassification', ] if TYPE_CHECKING: from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig from .feature_extraction_whisper import WhisperFeatureExtractor from .processing_whisper import WhisperProcessor from .tokenization_whisper import WhisperTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_whisper_fast import WhisperTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_whisper import ( WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, WhisperForAudioClassification, WhisperForConditionalGeneration, WhisperModel, WhisperPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_whisper import ( TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, TFWhisperForConditionalGeneration, TFWhisperModel, TFWhisperPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_whisper import ( FlaxWhisperForAudioClassification, FlaxWhisperForConditionalGeneration, FlaxWhisperModel, FlaxWhisperPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
16
"""simple docstring""" import itertools import random import unittest import numpy as np from transformers import ASTFeatureExtractor from transformers.testing_utils import require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin lowercase_ = random.Random() if is_torch_available(): import torch def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase=1.0 , __UpperCamelCase=None , __UpperCamelCase=None ): """simple docstring""" if rng is None: __A = global_rng __A = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class snake_case ( unittest.TestCase ): '''simple docstring''' def __init__( self : Any, _lowerCamelCase : List[str], _lowerCamelCase : Any=7, _lowerCamelCase : Optional[int]=4_00, _lowerCamelCase : Optional[int]=20_00, _lowerCamelCase : Dict=1, _lowerCamelCase : Optional[Any]=0.0, _lowerCamelCase : int=1_60_00, _lowerCamelCase : Optional[int]=True, _lowerCamelCase : Dict=True, ): '''simple docstring''' __A = parent __A = batch_size __A = min_seq_length __A = max_seq_length __A = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) __A = feature_size __A = padding_value __A = sampling_rate __A = return_attention_mask __A = do_normalize def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : Optional[Any]=False, _lowerCamelCase : int=False ): '''simple docstring''' def _flatten(_lowerCamelCase : List[str] ): return list(itertools.chain(*_lowerCamelCase ) ) if equal_length: __A = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size __A = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff ) ] if numpify: __A = [np.asarray(_lowerCamelCase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : int = ASTFeatureExtractor def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' __A = ASTFeatureExtractionTester(self ) def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' # Tests that all call wrap to encode_plus and batch_encode_plus __A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 __A = [floats_list((1, x) )[0] for x in range(8_00, 14_00, 2_00 )] __A = [np.asarray(_lowerCamelCase ) for speech_input in speech_inputs] # Test not batched input __A = feat_extract(speech_inputs[0], return_tensors='''np''' ).input_values __A = feat_extract(np_speech_inputs[0], return_tensors='''np''' ).input_values self.assertTrue(np.allclose(_lowerCamelCase, _lowerCamelCase, atol=1e-3 ) ) # Test batched __A = feat_extract(_lowerCamelCase, padding=_lowerCamelCase, return_tensors='''np''' ).input_values __A = feat_extract(_lowerCamelCase, padding=_lowerCamelCase, return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(_lowerCamelCase, _lowerCamelCase ): self.assertTrue(np.allclose(_lowerCamelCase, _lowerCamelCase, atol=1e-3 ) ) # Test 2-D numpy arrays are batched. __A = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)] __A = np.asarray(_lowerCamelCase ) __A = feat_extract(_lowerCamelCase, return_tensors='''np''' ).input_values __A = feat_extract(_lowerCamelCase, return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(_lowerCamelCase, _lowerCamelCase ): self.assertTrue(np.allclose(_lowerCamelCase, _lowerCamelCase, atol=1e-3 ) ) @require_torch def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' import torch __A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __A = np.random.rand(1_00 ).astype(np.floataa ) __A = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: __A = feature_extractor.pad([{'''input_values''': inputs}], return_tensors='''np''' ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) __A = feature_extractor.pad([{'''input_values''': inputs}], return_tensors='''pt''' ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : Union[str, Any] ): '''simple docstring''' from datasets import load_dataset __A = load_dataset('''hf-internal-testing/librispeech_asr_dummy''', '''clean''', split='''validation''' ) # automatic decoding with librispeech __A = ds.sort('''id''' ).select(range(_lowerCamelCase ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] @require_torch def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' # fmt: off __A = torch.tensor( [-0.98_94, -1.27_76, -0.90_66, -1.27_76, -0.93_49, -1.26_09, -1.03_86, -1.27_76, -1.15_61, -1.27_76, -1.20_52, -1.27_23, -1.21_90, -1.21_32, -1.27_76, -1.11_33, -1.19_53, -1.13_43, -1.15_84, -1.22_03, -1.17_70, -1.24_74, -1.23_81, -1.19_36, -0.92_70, -0.83_17, -0.80_49, -0.77_06, -0.75_65, -0.78_69] ) # fmt: on __A = self._load_datasamples(1 ) __A = ASTFeatureExtractor() __A = feature_extractor(_lowerCamelCase, return_tensors='''pt''' ).input_values self.assertEquals(input_values.shape, (1, 10_24, 1_28) ) self.assertTrue(torch.allclose(input_values[0, 0, :30], _lowerCamelCase, atol=1e-4 ) )
266
0
"""simple docstring""" import unittest from transformers import MPNetConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) class _lowerCAmelCase : """simple docstring""" def __init__( self : List[Any], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : List[Any]=1_3, UpperCAmelCase__ : Union[str, Any]=7, UpperCAmelCase__ : Tuple=True, UpperCAmelCase__ : Optional[Any]=True, UpperCAmelCase__ : Optional[int]=False, UpperCAmelCase__ : Union[str, Any]=True, UpperCAmelCase__ : Tuple=9_9, UpperCAmelCase__ : Optional[Any]=6_4, UpperCAmelCase__ : str=5, UpperCAmelCase__ : Optional[Any]=4, UpperCAmelCase__ : List[str]=6_4, UpperCAmelCase__ : Any="gelu", UpperCAmelCase__ : int=0.1, UpperCAmelCase__ : Tuple=0.1, UpperCAmelCase__ : Optional[Any]=5_1_2, UpperCAmelCase__ : Union[str, Any]=1_6, UpperCAmelCase__ : List[Any]=2, UpperCAmelCase__ : Tuple=0.02, UpperCAmelCase__ : str=3, UpperCAmelCase__ : List[str]=4, UpperCAmelCase__ : Dict=None, ): __lowercase = parent __lowercase = batch_size __lowercase = seq_length __lowercase = is_training __lowercase = use_input_mask __lowercase = use_token_type_ids __lowercase = use_labels __lowercase = vocab_size __lowercase = hidden_size __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = intermediate_size __lowercase = hidden_act __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = max_position_embeddings __lowercase = type_vocab_size __lowercase = type_sequence_label_size __lowercase = initializer_range __lowercase = num_labels __lowercase = num_choices __lowercase = scope def _lowercase ( self : List[str] ): return MPNetConfig.from_pretrained("microsoft/mpnet-base" ) def _lowercase ( self : Union[str, Any] ): __lowercase = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) __lowercase = None if self.use_input_mask: __lowercase = random_attention_mask([self.batch_size, self.seq_length] ) __lowercase = None __lowercase = None __lowercase = None if self.use_labels: __lowercase = ids_tensor([self.batch_size], self.type_sequence_label_size ) __lowercase = ids_tensor([self.batch_size, self.seq_length], self.num_labels ) __lowercase = ids_tensor([self.batch_size], self.num_choices ) __lowercase = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowercase ( self : Dict ): return MPNetConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, ) def _lowercase ( self : Optional[int], UpperCAmelCase__ : str, UpperCAmelCase__ : Any, UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Optional[Any] ): __lowercase = MPNetModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __lowercase = model(UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = model(UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) ) def _lowercase ( self : Optional[Any], UpperCAmelCase__ : int, UpperCAmelCase__ : str, UpperCAmelCase__ : Any, UpperCAmelCase__ : Tuple, UpperCAmelCase__ : int, UpperCAmelCase__ : str ): __lowercase = MPNetForQuestionAnswering(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __lowercase = model( UpperCAmelCase__, attention_mask=UpperCAmelCase__, start_positions=UpperCAmelCase__, end_positions=UpperCAmelCase__, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) ) def _lowercase ( self : Optional[Any], UpperCAmelCase__ : int, UpperCAmelCase__ : int, UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : List[str] ): __lowercase = self.num_labels __lowercase = MPNetForSequenceClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __lowercase = model(UpperCAmelCase__, attention_mask=UpperCAmelCase__, labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def _lowercase ( self : int, UpperCAmelCase__ : int, UpperCAmelCase__ : List[str], UpperCAmelCase__ : Any, UpperCAmelCase__ : Any, UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : List[str] ): __lowercase = self.num_choices __lowercase = MPNetForMultipleChoice(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __lowercase = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() __lowercase = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() __lowercase = model( UpperCAmelCase__, attention_mask=UpperCAmelCase__, labels=UpperCAmelCase__, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) ) def _lowercase ( self : Optional[int], UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : int, UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Any, UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : List[Any] ): __lowercase = self.num_labels __lowercase = MPNetForTokenClassification(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __lowercase = model(UpperCAmelCase__, attention_mask=UpperCAmelCase__, labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) ) def _lowercase ( self : int ): __lowercase = self.prepare_config_and_inputs() ((__lowercase) ,(__lowercase) ,(__lowercase) ,(__lowercase) ,(__lowercase) ,(__lowercase)) = config_and_inputs __lowercase = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class _lowerCAmelCase ( lowercase ,lowercase ,unittest.TestCase ): """simple docstring""" __UpperCAmelCase : Union[str, Any] = ( ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) if is_torch_available() else () ) __UpperCAmelCase : Optional[int] = ( { "feature-extraction": MPNetModel, "fill-mask": MPNetForMaskedLM, "question-answering": MPNetForQuestionAnswering, "text-classification": MPNetForSequenceClassification, "token-classification": MPNetForTokenClassification, "zero-shot": MPNetForSequenceClassification, } if is_torch_available() else {} ) __UpperCAmelCase : Any = False __UpperCAmelCase : List[Any] = True def _lowercase ( self : int ): __lowercase = MPNetModelTester(self ) __lowercase = ConfigTester(self, config_class=UpperCAmelCase__, hidden_size=3_7 ) def _lowercase ( self : Dict ): self.config_tester.run_common_tests() def _lowercase ( self : Union[str, Any] ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_model(*UpperCAmelCase__ ) def _lowercase ( self : Optional[Any] ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_sequence_classification(*UpperCAmelCase__ ) def _lowercase ( self : Optional[int] ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_multiple_choice(*UpperCAmelCase__ ) def _lowercase ( self : Union[str, Any] ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_token_classification(*UpperCAmelCase__ ) def _lowercase ( self : Optional[int] ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_question_answering(*UpperCAmelCase__ ) @require_torch class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def _lowercase ( self : Dict ): __lowercase = MPNetModel.from_pretrained("microsoft/mpnet-base" ) __lowercase = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) __lowercase = model(UpperCAmelCase__ )[0] __lowercase = torch.Size((1, 1_1, 7_6_8) ) self.assertEqual(output.shape, UpperCAmelCase__ ) __lowercase = torch.tensor( [[[-0.0_550, 0.1_943, -0.0_740], [-0.0_562, 0.2_211, -0.0_579], [-0.0_437, 0.3_337, -0.0_641]]] ) # compare the actual values for a slice. self.assertTrue(torch.allclose(output[:, :3, :3], UpperCAmelCase__, atol=1E-4 ) )
17
"""simple docstring""" def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = current_set.copy() for row_index, row in enumerate(__UpperCamelCase ): __A = row[0] for column_index, column in enumerate(__UpperCamelCase ): if magnitude == 0: __A = column continue __A = column / magnitude # Subtract to cancel term __A = current_set[0] __A = [first_row] __A = current_set[1::] for row in current_set: __A = [] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(__UpperCamelCase ) continue for column_index in range(len(__UpperCamelCase ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(__UpperCamelCase ) # Create next recursion iteration set if len(final_set[0] ) != 3: __A = final_set[0] __A = [] __A = [] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) __A = simplify(__UpperCamelCase ) for i in range(len(__UpperCamelCase ) ): resultant[i].insert(0 , current_first_column[i] ) resultant.insert(0 , __UpperCamelCase ) __A = resultant return final_set def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" if len(__UpperCamelCase ) == 0: raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) __A = len(__UpperCamelCase ) + 1 if any(len(__UpperCamelCase ) != _length for item in equations ): raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) for row in equations: if any(not isinstance(__UpperCamelCase , (int, float) ) for column in row ): raise ValueError('''solve_simultaneous() requires lists of integers''' ) if len(__UpperCamelCase ) == 1: return [equations[0][-1] / equations[0][0]] __A = equations.copy() if any(0 in row for row in data_set ): __A = data_set.copy() __A = [] for row_index, row in enumerate(__UpperCamelCase ): if 0 not in row: __A = data_set.pop(__UpperCamelCase ) break if not full_row: raise ValueError('''solve_simultaneous() requires at least 1 full equation''' ) data_set.insert(0 , __UpperCamelCase ) __A = data_set.copy() __A = simplify(__UpperCamelCase ) __A = simplified[::-1] __A = [] for row in simplified: __A = row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue __A = row.copy()[: len(__UpperCamelCase ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(__UpperCamelCase ) == 0: solutions.append(0 ) continue __A = temp_row[1::] __A = temp_row[::-1] for column_index, column in enumerate(__UpperCamelCase ): current_solution -= column * solutions[column_index] solutions.append(__UpperCamelCase ) __A = [] for item in solutions: final.append(float(round(__UpperCamelCase , 5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() lowercase_ = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
266
0
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Image from .base import TaskTemplate @dataclass(frozen=A__ ) class a__ ( A__ ): A = field(default='image-classification' , metadata={'include_in_asdict_even_if_is_default': True} ) A = Features({'image': Image()} ) A = Features({'labels': ClassLabel} ) A = "image" A = "labels" def __UpperCamelCase ( self : Any,_A : Optional[Any] ): """simple docstring""" if self.label_column not in features: raise ValueError(F'Column {self.label_column} is not present in features.' ) if not isinstance(features[self.label_column],_A ): raise ValueError(F'Column {self.label_column} is not a ClassLabel.' ) SCREAMING_SNAKE_CASE_ : Optional[Any] = copy.deepcopy(self ) SCREAMING_SNAKE_CASE_ : Tuple = self.label_schema.copy() SCREAMING_SNAKE_CASE_ : int = features[self.label_column] SCREAMING_SNAKE_CASE_ : List[Any] = label_schema return task_template @property def __UpperCamelCase ( self : List[str] ): """simple docstring""" return { self.image_column: "image", self.label_column: "labels", }
18
"""simple docstring""" from __future__ import annotations from typing import Any def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" if not postfix_notation: return 0 __A = {'''+''', '''-''', '''*''', '''/'''} __A = [] for token in postfix_notation: if token in operations: __A , __A = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(__UpperCamelCase ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
266
0
import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class _SCREAMING_SNAKE_CASE : def __init__( self , lowercase , lowercase=13 , lowercase=32 , lowercase=2 , lowercase=3 , lowercase=16 , lowercase=[1, 2, 1] , lowercase=[2, 2, 4] , lowercase=2 , lowercase=2.0 , lowercase=True , lowercase=0.0 , lowercase=0.0 , lowercase=0.1 , lowercase="gelu" , lowercase=False , lowercase=True , lowercase=0.0_2 , lowercase=1e-5 , lowercase=True , lowercase=None , lowercase=True , lowercase=10 , lowercase=8 , lowercase=["stage1", "stage2", "stage3"] , lowercase=[1, 2, 3] , ) -> List[str]: lowerCamelCase_ = parent lowerCamelCase_ = batch_size lowerCamelCase_ = image_size lowerCamelCase_ = patch_size lowerCamelCase_ = num_channels lowerCamelCase_ = embed_dim lowerCamelCase_ = depths lowerCamelCase_ = num_heads lowerCamelCase_ = window_size lowerCamelCase_ = mlp_ratio lowerCamelCase_ = qkv_bias lowerCamelCase_ = hidden_dropout_prob lowerCamelCase_ = attention_probs_dropout_prob lowerCamelCase_ = drop_path_rate lowerCamelCase_ = hidden_act lowerCamelCase_ = use_absolute_embeddings lowerCamelCase_ = patch_norm lowerCamelCase_ = layer_norm_eps lowerCamelCase_ = initializer_range lowerCamelCase_ = is_training lowerCamelCase_ = scope lowerCamelCase_ = use_labels lowerCamelCase_ = type_sequence_label_size lowerCamelCase_ = encoder_stride lowerCamelCase_ = out_features lowerCamelCase_ = out_indices def SCREAMING_SNAKE_CASE_( self ) -> int: lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase_ = None if self.use_labels: lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase_ = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]: return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase ) -> List[Any]: lowerCamelCase_ = MaskFormerSwinModel(config=lowercase ) model.to(lowercase ) model.eval() lowerCamelCase_ = model(lowercase ) lowerCamelCase_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) lowerCamelCase_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase ) -> Union[str, Any]: lowerCamelCase_ = MaskFormerSwinBackbone(config=lowercase ) model.to(lowercase ) model.eval() lowerCamelCase_ = model(lowercase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , [16, 32, 64] ) # verify ValueError with self.parent.assertRaises(lowercase ): lowerCamelCase_ = ["stem"] lowerCamelCase_ = MaskFormerSwinBackbone(config=lowercase ) def SCREAMING_SNAKE_CASE_( self ) -> List[Any]: lowerCamelCase_ = self.prepare_config_and_inputs() lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = config_and_inputs lowerCamelCase_ = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , unittest.TestCase ): lowerCAmelCase__ = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) lowerCAmelCase__ = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {} lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False def SCREAMING_SNAKE_CASE_( self ) -> Tuple: lowerCamelCase_ = MaskFormerSwinModelTester(self ) lowerCamelCase_ = ConfigTester(self , config_class=lowercase , embed_dim=37 ) @require_torch_multi_gpu @unittest.skip( reason=( "`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with" " `nn.DataParallel`" ) ) def SCREAMING_SNAKE_CASE_( self ) -> List[Any]: pass def SCREAMING_SNAKE_CASE_( self ) -> List[str]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def SCREAMING_SNAKE_CASE_( self ) -> Tuple: return def SCREAMING_SNAKE_CASE_( self ) -> str: lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase ) def SCREAMING_SNAKE_CASE_( self ) -> int: lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*lowercase ) @unittest.skip("Swin does not use inputs_embeds" ) def SCREAMING_SNAKE_CASE_( self ) -> List[str]: pass @unittest.skip("Swin does not support feedforward chunking" ) def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]: pass def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]: lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ = model_class(lowercase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCamelCase_ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowercase , nn.Linear ) ) def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]: lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ = model_class(lowercase ) lowerCamelCase_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase_ = [*signature.parameters.keys()] lowerCamelCase_ = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowercase ) @unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions" ) def SCREAMING_SNAKE_CASE_( self ) -> int: pass @unittest.skip(reason="MaskFormerSwin is only used as an internal backbone" ) def SCREAMING_SNAKE_CASE_( self ) -> List[Any]: pass def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase ) -> List[Any]: lowerCamelCase_ = model_class(lowercase ) model.to(lowercase ) model.eval() with torch.no_grad(): lowerCamelCase_ = model(**self._prepare_for_class(lowercase , lowercase ) ) lowerCamelCase_ = outputs.hidden_states lowerCamelCase_ = getattr( self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(lowercase ) , lowercase ) # Swin has a different seq_length lowerCamelCase_ = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCamelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def SCREAMING_SNAKE_CASE_( self ) -> Dict: lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: lowerCamelCase_ = True self.check_hidden_states_output(lowercase , lowercase , lowercase , lowercase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase_ = True self.check_hidden_states_output(lowercase , lowercase , lowercase , lowercase ) def SCREAMING_SNAKE_CASE_( self ) -> str: lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ = 3 lowerCamelCase_ = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) lowerCamelCase_ = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCamelCase_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) lowerCamelCase_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: lowerCamelCase_ = True self.check_hidden_states_output(lowercase , lowercase , lowercase , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase_ = True self.check_hidden_states_output(lowercase , lowercase , lowercase , (padded_height, padded_width) ) @unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints" ) def SCREAMING_SNAKE_CASE_( self ) -> Dict: pass @unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" ) def SCREAMING_SNAKE_CASE_( self ) -> Dict: pass @unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" ) def SCREAMING_SNAKE_CASE_( self ) -> List[str]: pass def SCREAMING_SNAKE_CASE_( self ) -> List[Any]: lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(lowercase ): lowerCamelCase_ = 0 return t def check_equivalence(lowercase , lowercase , lowercase , lowercase={} ): with torch.no_grad(): lowerCamelCase_ = model(**lowercase , return_dict=lowercase , **lowercase ) lowerCamelCase_ = model(**lowercase , return_dict=lowercase , **lowercase ).to_tuple() def recursive_check(lowercase , lowercase ): if isinstance(lowercase , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(lowercase , lowercase ): recursive_check(lowercase , lowercase ) elif isinstance(lowercase , lowercase ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values() ): recursive_check(lowercase , lowercase ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(lowercase ) , set_nan_tensor_to_zero(lowercase ) , atol=1e-5 ) , msg=( "Tuple and dict output are not equal. Difference:" f' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:' f' {torch.isnan(lowercase ).any()} and `inf`: {torch.isinf(lowercase )}. Dict has' f' `nan`: {torch.isnan(lowercase ).any()} and `inf`: {torch.isinf(lowercase )}.' ) , ) recursive_check(lowercase , lowercase ) for model_class in self.all_model_classes: lowerCamelCase_ = model_class(lowercase ) model.to(lowercase ) model.eval() lowerCamelCase_ = self._prepare_for_class(lowercase , lowercase ) lowerCamelCase_ = self._prepare_for_class(lowercase , lowercase ) check_equivalence(lowercase , lowercase , lowercase ) lowerCamelCase_ = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase ) lowerCamelCase_ = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase ) check_equivalence(lowercase , lowercase , lowercase ) lowerCamelCase_ = self._prepare_for_class(lowercase , lowercase ) lowerCamelCase_ = self._prepare_for_class(lowercase , lowercase ) check_equivalence(lowercase , lowercase , lowercase , {"output_hidden_states": True} ) lowerCamelCase_ = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase ) lowerCamelCase_ = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase ) check_equivalence(lowercase , lowercase , lowercase , {"output_hidden_states": True} ) @require_torch class _SCREAMING_SNAKE_CASE ( unittest.TestCase , snake_case_ ): lowerCAmelCase__ = (MaskFormerSwinBackbone,) if is_torch_available() else () lowerCAmelCase__ = MaskFormerSwinConfig def SCREAMING_SNAKE_CASE_( self ) -> Any: lowerCamelCase_ = MaskFormerSwinModelTester(self ) def SCREAMING_SNAKE_CASE_( self ) -> List[str]: lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ = inputs_dict["pixel_values"].shape[0] for backbone_class in self.all_model_classes: lowerCamelCase_ = backbone_class(lowercase ) backbone.to(lowercase ) backbone.eval() lowerCamelCase_ = backbone(**lowercase ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , lowercase ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True lowerCamelCase_ = backbone(**lowercase , output_hidden_states=lowercase ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: lowerCamelCase_ = backbone(**lowercase , output_attentions=lowercase ) self.assertIsNotNone(outputs.attentions )
19
"""simple docstring""" import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class snake_case ( unittest.TestCase ): '''simple docstring''' def __init__( self : Optional[Any], _lowerCamelCase : Tuple, _lowerCamelCase : List[str]=13, _lowerCamelCase : Optional[Any]=7, _lowerCamelCase : Optional[int]=True, _lowerCamelCase : int=True, _lowerCamelCase : List[str]=True, _lowerCamelCase : Optional[Any]=True, _lowerCamelCase : int=99, _lowerCamelCase : Optional[int]=32, _lowerCamelCase : Tuple=5, _lowerCamelCase : Tuple=4, _lowerCamelCase : str=37, _lowerCamelCase : Union[str, Any]="gelu", _lowerCamelCase : int=0.1, _lowerCamelCase : List[Any]=0.1, _lowerCamelCase : Dict=5_12, _lowerCamelCase : List[Any]=16, _lowerCamelCase : Any=2, _lowerCamelCase : Any=0.02, _lowerCamelCase : Dict=4, ): '''simple docstring''' __A = parent __A = batch_size __A = seq_length __A = is_training __A = use_attention_mask __A = use_token_type_ids __A = use_labels __A = vocab_size __A = hidden_size __A = num_hidden_layers __A = num_attention_heads __A = intermediate_size __A = hidden_act __A = hidden_dropout_prob __A = attention_probs_dropout_prob __A = max_position_embeddings __A = type_vocab_size __A = type_sequence_label_size __A = initializer_range __A = num_choices def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' __A = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) __A = None if self.use_attention_mask: __A = random_attention_mask([self.batch_size, self.seq_length] ) __A = None if self.use_token_type_ids: __A = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size ) __A = RoFormerConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=_lowerCamelCase, initializer_range=self.initializer_range, ) return config, input_ids, token_type_ids, attention_mask def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = self.prepare_config_and_inputs() __A , __A , __A , __A = config_and_inputs __A = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask} return config, inputs_dict @require_flax class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : Dict = True A_ : Tuple = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' __A = FlaxRoFormerModelTester(self ) @slow def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' for model_class_name in self.all_model_classes: __A = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''', from_pt=_lowerCamelCase ) __A = model(np.ones((1, 1) ) ) self.assertIsNotNone(_lowerCamelCase ) @require_flax class snake_case ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' ) __A = jnp.array([[0, 1, 2, 3, 4, 5]] ) __A = model(_lowerCamelCase )[0] __A = 5_00_00 __A = (1, 6, vocab_size) self.assertEqual(output.shape, _lowerCamelCase ) __A = jnp.array( [[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3], _lowerCamelCase, atol=1e-4 ) )
266
0
import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 lowercase : List[Any] = sys.version_info >= (3, 10) def _snake_case( SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None ) -> Tuple: return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE__ ) @dataclass class __snake_case : _a : int _a : float _a : str _a : bool @dataclass class __snake_case : _a : int= 42 _a : str= field(default="toto" , metadata={"help": "help message"} ) @dataclass class __snake_case : _a : bool= False _a : bool= True _a : Optional[bool]= None class __snake_case ( lowerCAmelCase ): _a : Optional[Any]= "titi" _a : Dict= "toto" class __snake_case ( lowerCAmelCase ): _a : Optional[Any]= "titi" _a : str= "toto" _a : Tuple= 42 @dataclass class __snake_case : _a : BasicEnum= "toto" def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : List[Any] = BasicEnum(self.foo ) @dataclass class __snake_case : _a : MixedTypeEnum= "toto" def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : List[Any] = MixedTypeEnum(self.foo ) @dataclass class __snake_case : _a : Optional[int]= None _a : Optional[float]= field(default=lowerCAmelCase , metadata={"help": "help message"} ) _a : Optional[str]= None _a : Optional[List[str]]= list_field(default=[] ) _a : Optional[List[int]]= list_field(default=[] ) @dataclass class __snake_case : _a : List[int]= list_field(default=[] ) _a : List[int]= list_field(default=[1, 2, 3] ) _a : List[str]= list_field(default=["Hallo", "Bonjour", "Hello"] ) _a : List[float]= list_field(default=[0.1, 0.2, 0.3] ) @dataclass class __snake_case : _a : List[int]= field() _a : str= field() _a : BasicEnum= field() def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : List[Any] = BasicEnum(self.required_enum ) @dataclass class __snake_case : _a : int _a : "BasicEnum"= field() _a : "Optional[bool]"= None _a : "str"= field(default="toto" , metadata={"help": "help message"} ) _a : "List[str]"= list_field(default=["Hallo", "Bonjour", "Hello"] ) if is_python_no_less_than_3_10: @dataclass class __snake_case : _a : bool= False _a : bool= True _a : bool | None= None @dataclass class __snake_case : _a : int | None= None _a : float | None= field(default=lowerCAmelCase , metadata={"help": "help message"} ) _a : str | None= None _a : list[str] | None= list_field(default=[] ) _a : list[int] | None= list_field(default=[] ) class __snake_case ( unittest.TestCase ): def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ): '''simple docstring''' self.assertEqual(len(a._actions ) ,len(b._actions ) ) for x, y in zip(a._actions ,b._actions ): lowercase : Any = {k: v for k, v in vars(snake_case ).items() if k != """container"""} lowercase : str = {k: v for k, v in vars(snake_case ).items() if k != """container"""} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get("""choices""" ,snake_case ) and yy.get("""choices""" ,snake_case ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx["""type"""](snake_case ) ,yy["""type"""](snake_case ) ) del xx["type"], yy["type"] self.assertEqual(snake_case ,snake_case ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Tuple = HfArgumentParser(snake_case ) lowercase : str = argparse.ArgumentParser() expected.add_argument("""--foo""" ,type=snake_case ,required=snake_case ) expected.add_argument("""--bar""" ,type=snake_case ,required=snake_case ) expected.add_argument("""--baz""" ,type=snake_case ,required=snake_case ) expected.add_argument("""--flag""" ,type=snake_case ,default=snake_case ,const=snake_case ,nargs="""?""" ) self.argparsersEqual(snake_case ,snake_case ) lowercase : List[Any] = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""] ((lowercase) , ) : Union[str, Any] = parser.parse_args_into_dataclasses(snake_case ,look_for_args_file=snake_case ) self.assertFalse(example.flag ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Union[str, Any] = HfArgumentParser(snake_case ) lowercase : int = argparse.ArgumentParser() expected.add_argument("""--foo""" ,default=42 ,type=snake_case ) expected.add_argument("""--baz""" ,default="""toto""" ,type=snake_case ,help="""help message""" ) self.argparsersEqual(snake_case ,snake_case ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : int = argparse.ArgumentParser() expected.add_argument("""--foo""" ,type=snake_case ,default=snake_case ,const=snake_case ,nargs="""?""" ) expected.add_argument("""--baz""" ,type=snake_case ,default=snake_case ,const=snake_case ,nargs="""?""" ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument("""--no_baz""" ,action="""store_false""" ,default=snake_case ,dest="""baz""" ) expected.add_argument("""--opt""" ,type=snake_case ,default=snake_case ) lowercase : Union[str, Any] = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(snake_case ) for dataclass_type in dataclass_types: lowercase : Optional[Any] = HfArgumentParser(snake_case ) self.argparsersEqual(snake_case ,snake_case ) lowercase : Dict = parser.parse_args([] ) self.assertEqual(snake_case ,Namespace(foo=snake_case ,baz=snake_case ,opt=snake_case ) ) lowercase : int = parser.parse_args(["""--foo""", """--no_baz"""] ) self.assertEqual(snake_case ,Namespace(foo=snake_case ,baz=snake_case ,opt=snake_case ) ) lowercase : Optional[Any] = parser.parse_args(["""--foo""", """--baz"""] ) self.assertEqual(snake_case ,Namespace(foo=snake_case ,baz=snake_case ,opt=snake_case ) ) lowercase : int = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] ) self.assertEqual(snake_case ,Namespace(foo=snake_case ,baz=snake_case ,opt=snake_case ) ) lowercase : Dict = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] ) self.assertEqual(snake_case ,Namespace(foo=snake_case ,baz=snake_case ,opt=snake_case ) ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Optional[Any] = HfArgumentParser(snake_case ) lowercase : List[Any] = argparse.ArgumentParser() expected.add_argument( """--foo""" ,default="""toto""" ,choices=["""titi""", """toto""", 42] ,type=make_choice_type_function(["""titi""", """toto""", 42] ) ,) self.argparsersEqual(snake_case ,snake_case ) lowercase : Union[str, Any] = parser.parse_args([] ) self.assertEqual(args.foo ,"""toto""" ) lowercase : Optional[Any] = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo ,MixedTypeEnum.toto ) lowercase : int = parser.parse_args(["""--foo""", """titi"""] ) self.assertEqual(args.foo ,"""titi""" ) lowercase : List[str] = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0] self.assertEqual(enum_ex.foo ,MixedTypeEnum.titi ) lowercase : Tuple = parser.parse_args(["""--foo""", """42"""] ) self.assertEqual(args.foo ,42 ) lowercase : str = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0] self.assertEqual(enum_ex.foo ,MixedTypeEnum.fourtytwo ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' @dataclass class __snake_case : _a : Literal["titi", "toto", 42]= "toto" lowercase : Union[str, Any] = HfArgumentParser(snake_case ) lowercase : Any = argparse.ArgumentParser() expected.add_argument( """--foo""" ,default="""toto""" ,choices=("""titi""", """toto""", 42) ,type=make_choice_type_function(["""titi""", """toto""", 42] ) ,) self.argparsersEqual(snake_case ,snake_case ) lowercase : Any = parser.parse_args([] ) self.assertEqual(args.foo ,"""toto""" ) lowercase : str = parser.parse_args(["""--foo""", """titi"""] ) self.assertEqual(args.foo ,"""titi""" ) lowercase : Optional[Any] = parser.parse_args(["""--foo""", """42"""] ) self.assertEqual(args.foo ,42 ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : str = HfArgumentParser(snake_case ) lowercase : Optional[Any] = argparse.ArgumentParser() expected.add_argument("""--foo_int""" ,nargs="""+""" ,default=[] ,type=snake_case ) expected.add_argument("""--bar_int""" ,nargs="""+""" ,default=[1, 2, 3] ,type=snake_case ) expected.add_argument("""--foo_str""" ,nargs="""+""" ,default=["""Hallo""", """Bonjour""", """Hello"""] ,type=snake_case ) expected.add_argument("""--foo_float""" ,nargs="""+""" ,default=[0.1, 0.2, 0.3] ,type=snake_case ) self.argparsersEqual(snake_case ,snake_case ) lowercase : int = parser.parse_args([] ) self.assertEqual( snake_case ,Namespace(foo_int=[] ,bar_int=[1, 2, 3] ,foo_str=["""Hallo""", """Bonjour""", """Hello"""] ,foo_float=[0.1, 0.2, 0.3] ) ,) lowercase : Optional[int] = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() ) self.assertEqual(snake_case ,Namespace(foo_int=[1] ,bar_int=[2, 3] ,foo_str=["""a""", """b""", """c"""] ,foo_float=[0.1, 0.7] ) ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : str = argparse.ArgumentParser() expected.add_argument("""--foo""" ,default=snake_case ,type=snake_case ) expected.add_argument("""--bar""" ,default=snake_case ,type=snake_case ,help="""help message""" ) expected.add_argument("""--baz""" ,default=snake_case ,type=snake_case ) expected.add_argument("""--ces""" ,nargs="""+""" ,default=[] ,type=snake_case ) expected.add_argument("""--des""" ,nargs="""+""" ,default=[] ,type=snake_case ) lowercase : List[Any] = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(snake_case ) for dataclass_type in dataclass_types: lowercase : Union[str, Any] = HfArgumentParser(snake_case ) self.argparsersEqual(snake_case ,snake_case ) lowercase : Optional[int] = parser.parse_args([] ) self.assertEqual(snake_case ,Namespace(foo=snake_case ,bar=snake_case ,baz=snake_case ,ces=[] ,des=[] ) ) lowercase : Any = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() ) self.assertEqual(snake_case ,Namespace(foo=12 ,bar=3.14 ,baz="""42""" ,ces=["""a""", """b""", """c"""] ,des=[1, 2, 3] ) ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Dict = HfArgumentParser(snake_case ) lowercase : Any = argparse.ArgumentParser() expected.add_argument("""--required_list""" ,nargs="""+""" ,type=snake_case ,required=snake_case ) expected.add_argument("""--required_str""" ,type=snake_case ,required=snake_case ) expected.add_argument( """--required_enum""" ,type=make_choice_type_function(["""titi""", """toto"""] ) ,choices=["""titi""", """toto"""] ,required=snake_case ,) self.argparsersEqual(snake_case ,snake_case ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : str = HfArgumentParser(snake_case ) lowercase : Union[str, Any] = argparse.ArgumentParser() expected.add_argument("""--foo""" ,type=snake_case ,required=snake_case ) expected.add_argument( """--required_enum""" ,type=make_choice_type_function(["""titi""", """toto"""] ) ,choices=["""titi""", """toto"""] ,required=snake_case ,) expected.add_argument("""--opt""" ,type=snake_case ,default=snake_case ) expected.add_argument("""--baz""" ,default="""toto""" ,type=snake_case ,help="""help message""" ) expected.add_argument("""--foo_str""" ,nargs="""+""" ,default=["""Hallo""", """Bonjour""", """Hello"""] ,type=snake_case ) self.argparsersEqual(snake_case ,snake_case ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Dict = HfArgumentParser(snake_case ) lowercase : List[str] = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, } lowercase : Dict = parser.parse_dict(snake_case )[0] lowercase : Any = BasicExample(**snake_case ) self.assertEqual(snake_case ,snake_case ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Optional[int] = HfArgumentParser(snake_case ) lowercase : Optional[Any] = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, """extra""": 42, } self.assertRaises(snake_case ,parser.parse_dict ,snake_case ,allow_extra_keys=snake_case ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : List[Any] = HfArgumentParser(snake_case ) lowercase : List[str] = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, } with tempfile.TemporaryDirectory() as tmp_dir: lowercase : Dict = os.path.join(snake_case ,"""temp_json""" ) os.mkdir(snake_case ) with open(temp_local_path + """.json""" ,"""w+""" ) as f: json.dump(snake_case ,snake_case ) lowercase : Dict = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0] lowercase : Any = BasicExample(**snake_case ) self.assertEqual(snake_case ,snake_case ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : int = HfArgumentParser(snake_case ) lowercase : Optional[Any] = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, } with tempfile.TemporaryDirectory() as tmp_dir: lowercase : Optional[Any] = os.path.join(snake_case ,"""temp_yaml""" ) os.mkdir(snake_case ) with open(temp_local_path + """.yaml""" ,"""w+""" ) as f: yaml.dump(snake_case ,snake_case ) lowercase : List[str] = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0] lowercase : Any = BasicExample(**snake_case ) self.assertEqual(snake_case ,snake_case ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Any = HfArgumentParser(snake_case ) self.assertIsNotNone(snake_case )
20
"""simple docstring""" from collections import defaultdict from math import ceil, sqrt def lowerCAmelCase ( __UpperCamelCase = 1_0_0_0_0_0_0 , __UpperCamelCase = 1_0 ): """simple docstring""" __A = defaultdict(__UpperCamelCase ) for outer_width in range(3 , (t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: __A = max( ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 ) else: __A = 1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(__UpperCamelCase , outer_width - 1 , 2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 1_0 ) if __name__ == "__main__": print(F'''{solution() = }''')
266
0
from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : Optional[Any] = { "edbeeching/decision-transformer-gym-hopper-medium": ( "https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json" ), # See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer } class _lowerCamelCase( _a ): lowercase_ : int = """decision_transformer""" lowercase_ : str = ["""past_key_values"""] lowercase_ : str = { """max_position_embeddings""": """n_positions""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self, lowerCamelCase=17, lowerCamelCase=4, lowerCamelCase=1_28, lowerCamelCase=40_96, lowerCamelCase=True, lowerCamelCase=1, lowerCamelCase=10_24, lowerCamelCase=3, lowerCamelCase=1, lowerCamelCase=None, lowerCamelCase="relu", lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=1E-5, lowerCamelCase=0.0_2, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=5_02_56, lowerCamelCase=5_02_56, lowerCamelCase=False, lowerCamelCase=False, **lowerCamelCase, ) -> List[Any]: """simple docstring""" _lowercase : List[str] = state_dim _lowercase : Any = act_dim _lowercase : List[Any] = hidden_size _lowercase : int = max_ep_len _lowercase : Tuple = action_tanh _lowercase : Any = vocab_size _lowercase : int = n_positions _lowercase : Dict = n_layer _lowercase : Tuple = n_head _lowercase : Optional[int] = n_inner _lowercase : Optional[Any] = activation_function _lowercase : str = resid_pdrop _lowercase : List[Any] = embd_pdrop _lowercase : Optional[int] = attn_pdrop _lowercase : Any = layer_norm_epsilon _lowercase : List[Any] = initializer_range _lowercase : int = scale_attn_weights _lowercase : Optional[Any] = use_cache _lowercase : Union[str, Any] = scale_attn_by_inverse_layer_idx _lowercase : Tuple = reorder_and_upcast_attn _lowercase : Optional[int] = bos_token_id _lowercase : int = eos_token_id super().__init__(bos_token_id=lowerCamelCase, eos_token_id=lowerCamelCase, **lowerCamelCase)
21
"""simple docstring""" import numpy as np import torch from torch.utils.data import DataLoader from accelerate.utils.dataclasses import DistributedType class snake_case : '''simple docstring''' def __init__( self : Optional[int], _lowerCamelCase : Optional[int]=2, _lowerCamelCase : Optional[int]=3, _lowerCamelCase : int=64, _lowerCamelCase : List[str]=None ): '''simple docstring''' __A = np.random.default_rng(_lowerCamelCase ) __A = length __A = rng.normal(size=(length,) ).astype(np.floataa ) __A = a * self.x + b + rng.normal(scale=0.1, size=(length,) ).astype(np.floataa ) def __len__( self : str ): '''simple docstring''' return self.length def __getitem__( self : Dict, _lowerCamelCase : Optional[int] ): '''simple docstring''' return {"x": self.x[i], "y": self.y[i]} class snake_case ( torch.nn.Module ): '''simple docstring''' def __init__( self : Optional[Any], _lowerCamelCase : Tuple=0, _lowerCamelCase : Any=0, _lowerCamelCase : Optional[Any]=False ): '''simple docstring''' super().__init__() __A = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) __A = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) __A = True def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : Optional[Any]=None ): '''simple docstring''' if self.first_batch: print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' ) __A = False return x * self.a[0] + self.b[0] class snake_case ( torch.nn.Module ): '''simple docstring''' def __init__( self : str, _lowerCamelCase : Optional[Any]=0, _lowerCamelCase : Any=0, _lowerCamelCase : List[Any]=False ): '''simple docstring''' super().__init__() __A = torch.nn.Parameter(torch.tensor(_lowerCamelCase ).float() ) __A = torch.nn.Parameter(torch.tensor(_lowerCamelCase ).float() ) __A = True def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : List[str]=None ): '''simple docstring''' if self.first_batch: print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' ) __A = False return x * self.a + self.b def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase = 1_6 ): """simple docstring""" from datasets import load_dataset from transformers import AutoTokenizer __A = AutoTokenizer.from_pretrained('''bert-base-cased''' ) __A = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''} __A = load_dataset('''csv''' , data_files=__UpperCamelCase ) __A = datasets['''train'''].unique('''label''' ) __A = {v: i for i, v in enumerate(__UpperCamelCase )} def tokenize_function(__UpperCamelCase ): # max_length=None => use the model max length (it's actually the default) __A = tokenizer( examples['''sentence1'''] , examples['''sentence2'''] , truncation=__UpperCamelCase , max_length=__UpperCamelCase , padding='''max_length''' ) if "label" in examples: __A = [label_to_id[l] for l in examples['''label''']] return outputs # Apply the method we just defined to all the examples in all the splits of the dataset __A = datasets.map( __UpperCamelCase , batched=__UpperCamelCase , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , ) def collate_fn(__UpperCamelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(__UpperCamelCase , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' ) return tokenizer.pad(__UpperCamelCase , padding='''longest''' , return_tensors='''pt''' ) # Instantiate dataloaders. __A = DataLoader(tokenized_datasets['''train'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=2 ) __A = DataLoader(tokenized_datasets['''validation'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=1 ) return train_dataloader, eval_dataloader
266
0
'''simple docstring''' import requests __SCREAMING_SNAKE_CASE :Optional[Any] = '''https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey=''' def UpperCAmelCase_ ( __lowercase : str ) -> None: '''simple docstring''' _UpperCAmelCase = requests.get(_NEWS_API + bbc_news_api_key ).json() # each article in the list is a dict for i, article in enumerate(bbc_news_page["articles"] , 1 ): print(f'{i}.) {article["title"]}' ) if __name__ == "__main__": fetch_bbc_news(bbc_news_api_key='''<Your BBC News API key goes here>''')
22
"""simple docstring""" from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo lowercase_ = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n' lowercase_ = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n' lowercase_ = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case ( datasets.Metric ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''', id='''token''' ), id='''sequence''' ), '''references''': datasets.Sequence( datasets.Sequence(datasets.Value('''string''', id='''token''' ), id='''sequence''' ), id='''references''' ), } ), ) def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : List[List[List[str]]], _lowerCamelCase : List[List[str]], _lowerCamelCase : int = 1, _lowerCamelCase : int = 4, ): '''simple docstring''' return { "google_bleu": gleu_score.corpus_gleu( list_of_references=_lowerCamelCase, hypotheses=_lowerCamelCase, min_len=_lowerCamelCase, max_len=_lowerCamelCase ) }
266
0
'''simple docstring''' def snake_case_ ( _lowerCAmelCase : int ) -> bool: if num < 0: return False UpperCAmelCase : int = num UpperCAmelCase : int = 0 while num > 0: UpperCAmelCase : Any = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
23
"""simple docstring""" class snake_case : '''simple docstring''' def __init__( self : List[str], _lowerCamelCase : list[int] ): '''simple docstring''' __A = len(_lowerCamelCase ) __A = [0] * len_array if len_array > 0: __A = array[0] for i in range(1, _lowerCamelCase ): __A = self.prefix_sum[i - 1] + array[i] def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : int, _lowerCamelCase : int ): '''simple docstring''' if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : int ): '''simple docstring''' __A = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(_lowerCamelCase ) return False if __name__ == "__main__": import doctest doctest.testmod()
266
0
from collections.abc import Sequence from queue import Queue class SCREAMING_SNAKE_CASE__ : def __init__(self : Union[str, Any] , a__ : Any , a__ : Tuple , a__ : List[Any] , a__ : List[str]=None , a__ : Union[str, Any]=None ): """simple docstring""" __snake_case = start __snake_case = end __snake_case = val __snake_case = (start + end) // 2 __snake_case = left __snake_case = right def __repr__(self : int ): """simple docstring""" return f"""SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})""" class SCREAMING_SNAKE_CASE__ : def __init__(self : Dict , a__ : Sequence , a__ : Tuple ): """simple docstring""" __snake_case = collection __snake_case = function if self.collection: __snake_case = self._build_tree(0 , len(a__ ) - 1 ) def a (self : Optional[Any] , a__ : Dict , a__ : Tuple ): """simple docstring""" self._update_tree(self.root , a__ , a__ ) def a (self : Tuple , a__ : List[Any] , a__ : Optional[Any] ): """simple docstring""" return self._query_range(self.root , a__ , a__ ) def a (self : List[Any] , a__ : Any , a__ : List[Any] ): """simple docstring""" if start == end: return SegmentTreeNode(a__ , a__ , self.collection[start] ) __snake_case = (start + end) // 2 __snake_case = self._build_tree(a__ , a__ ) __snake_case = self._build_tree(mid + 1 , a__ ) return SegmentTreeNode(a__ , a__ , self.fn(left.val , right.val ) , a__ , a__ ) def a (self : str , a__ : Any , a__ : List[str] , a__ : Tuple ): """simple docstring""" if node.start == i and node.end == i: __snake_case = val return if i <= node.mid: self._update_tree(node.left , a__ , a__ ) else: self._update_tree(node.right , a__ , a__ ) __snake_case = self.fn(node.left.val , node.right.val ) def a (self : Dict , a__ : Any , a__ : List[Any] , a__ : str ): """simple docstring""" if node.start == i and node.end == j: return node.val if i <= node.mid: if j <= node.mid: # range in left child tree return self._query_range(node.left , a__ , a__ ) else: # range in left child tree and right child tree return self.fn( self._query_range(node.left , a__ , node.mid ) , self._query_range(node.right , node.mid + 1 , a__ ) , ) else: # range in right child tree return self._query_range(node.right , a__ , a__ ) def a (self : Dict ): """simple docstring""" if self.root is not None: __snake_case = Queue() queue.put(self.root ) while not queue.empty(): __snake_case = queue.get() yield node if node.left is not None: queue.put(node.left ) if node.right is not None: queue.put(node.right ) if __name__ == "__main__": import operator for fn in [operator.add, max, min]: print('*' * 50) snake_case_ = SegmentTree([2, 1, 5, 3, 4], fn) for node in arr.traverse(): print(node) print() arr.update(1, 5) for node in arr.traverse(): print(node) print() print(arr.query_range(3, 4)) # 7 print(arr.query_range(2, 2)) # 5 print(arr.query_range(1, 3)) # 13 print()
24
"""simple docstring""" import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowercase_ = logging.get_logger(__name__) lowercase_ = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } lowercase_ = { 'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'}, 'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'}, 'tokenizer_config_file': { 'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json' }, } lowercase_ = {'facebook/blenderbot-3B': 128} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def lowerCAmelCase ( ): """simple docstring""" __A = ( list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) ) ) __A = bs[:] __A = 0 for b in range(2**8 ): if b not in bs: bs.append(__UpperCamelCase ) cs.append(2**8 + n ) n += 1 __A = [chr(__UpperCamelCase ) for n in cs] return dict(zip(__UpperCamelCase , __UpperCamelCase ) ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = set() __A = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __A = char return pairs class snake_case ( _lowerCAmelCase ): '''simple docstring''' A_ : Tuple = VOCAB_FILES_NAMES A_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP A_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A_ : Optional[Any] = ["input_ids", "attention_mask"] def __init__( self : Dict, _lowerCamelCase : Optional[Any], _lowerCamelCase : List[str], _lowerCamelCase : Dict="replace", _lowerCamelCase : Any="<s>", _lowerCamelCase : Optional[int]="</s>", _lowerCamelCase : Dict="</s>", _lowerCamelCase : List[Any]="<s>", _lowerCamelCase : List[str]="<unk>", _lowerCamelCase : str="<pad>", _lowerCamelCase : Any="<mask>", _lowerCamelCase : Any=False, **_lowerCamelCase : Tuple, ): '''simple docstring''' __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else bos_token __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else eos_token __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else sep_token __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else cls_token __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else unk_token __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else mask_token super().__init__( errors=_lowerCamelCase, bos_token=_lowerCamelCase, eos_token=_lowerCamelCase, unk_token=_lowerCamelCase, sep_token=_lowerCamelCase, cls_token=_lowerCamelCase, pad_token=_lowerCamelCase, mask_token=_lowerCamelCase, add_prefix_space=_lowerCamelCase, **_lowerCamelCase, ) with open(_lowerCamelCase, encoding='''utf-8''' ) as vocab_handle: __A = json.load(_lowerCamelCase ) __A = {v: k for k, v in self.encoder.items()} __A = errors # how to handle errors in decoding __A = bytes_to_unicode() __A = {v: k for k, v in self.byte_encoder.items()} with open(_lowerCamelCase, encoding='''utf-8''' ) as merges_handle: __A = merges_handle.read().split('''\n''' )[1:-1] __A = [tuple(merge.split() ) for merge in bpe_merges] __A = dict(zip(_lowerCamelCase, range(len(_lowerCamelCase ) ) ) ) __A = {} __A = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __A = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' return len(self.encoder ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' return dict(self.encoder, **self.added_tokens_encoder ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : List[Any] ): '''simple docstring''' if token in self.cache: return self.cache[token] __A = tuple(_lowerCamelCase ) __A = get_pairs(_lowerCamelCase ) if not pairs: return token while True: __A = min(_lowerCamelCase, key=lambda _lowerCamelCase : self.bpe_ranks.get(_lowerCamelCase, float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break __A , __A = bigram __A = [] __A = 0 while i < len(_lowerCamelCase ): try: __A = word.index(_lowerCamelCase, _lowerCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __A = j if word[i] == first and i < len(_lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __A = tuple(_lowerCamelCase ) __A = new_word if len(_lowerCamelCase ) == 1: break else: __A = get_pairs(_lowerCamelCase ) __A = ''' '''.join(_lowerCamelCase ) __A = word return word def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : Dict ): '''simple docstring''' __A = [] for token in re.findall(self.pat, _lowerCamelCase ): __A = ''''''.join( self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowerCamelCase ).split(''' ''' ) ) return bpe_tokens def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : Dict ): '''simple docstring''' return self.encoder.get(_lowerCamelCase, self.encoder.get(self.unk_token ) ) def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : Any ): '''simple docstring''' return self.decoder.get(_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : Dict ): '''simple docstring''' __A = ''''''.join(_lowerCamelCase ) __A = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''', errors=self.errors ) return text def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : str, _lowerCamelCase : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(_lowerCamelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __A = os.path.join( _lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) __A = os.path.join( _lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(_lowerCamelCase, '''w''', encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=_lowerCamelCase, ensure_ascii=_lowerCamelCase ) + '''\n''' ) __A = 0 with open(_lowerCamelCase, '''w''', encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda _lowerCamelCase : kv[1] ): if index != token_index: logger.warning( f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.' ''' Please check that the tokenizer is not corrupted!''' ) __A = token_index writer.write(''' '''.join(_lowerCamelCase ) + '''\n''' ) index += 1 return vocab_file, merge_file def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : List[int], _lowerCamelCase : Optional[List[int]] = None, _lowerCamelCase : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCamelCase, token_ids_a=_lowerCamelCase, already_has_special_tokens=_lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(_lowerCamelCase )) + [1] return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1] def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : List[int], _lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' __A = [self.sep_token_id] __A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : Union[str, Any], _lowerCamelCase : List[str]=False, **_lowerCamelCase : List[Any] ): '''simple docstring''' __A = kwargs.pop('''add_prefix_space''', self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(_lowerCamelCase ) > 0 and not text[0].isspace()): __A = ''' ''' + text return (text, kwargs) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : List[int], _lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' return token_ids_a + [self.eos_token_id] def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : "Conversation" ): '''simple docstring''' __A = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(''' ''' + text ) else: # Generated responses should contain them already. inputs.append(_lowerCamelCase ) __A = ''' '''.join(_lowerCamelCase ) __A = self.encode(_lowerCamelCase ) if len(_lowerCamelCase ) > self.model_max_length: __A = input_ids[-self.model_max_length :] logger.warning(f'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' ) return input_ids
266
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCAmelCase__ : int = logging.get_logger(__name__) UpperCAmelCase__ : Optional[Any] = { 'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json', # See all Nat models at https://huggingface.co/models?filter=nat } class lowerCAmelCase_ (a__ , a__ ): """simple docstring""" __UpperCamelCase : Dict = '''nat''' __UpperCamelCase : Tuple = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__(self , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=64 , SCREAMING_SNAKE_CASE__=[3, 4, 6, 5] , SCREAMING_SNAKE_CASE__=[2, 4, 8, 16] , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=3.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-5 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ , ) -> Dict: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : str = patch_size SCREAMING_SNAKE_CASE__ : Tuple = num_channels SCREAMING_SNAKE_CASE__ : List[str] = embed_dim SCREAMING_SNAKE_CASE__ : Dict = depths SCREAMING_SNAKE_CASE__ : List[str] = len(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = num_heads SCREAMING_SNAKE_CASE__ : Any = kernel_size SCREAMING_SNAKE_CASE__ : int = mlp_ratio SCREAMING_SNAKE_CASE__ : str = qkv_bias SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : Optional[int] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : Any = drop_path_rate SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_act SCREAMING_SNAKE_CASE__ : Optional[int] = layer_norm_eps SCREAMING_SNAKE_CASE__ : Optional[int] = initializer_range # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model SCREAMING_SNAKE_CASE__ : Optional[int] = int(embed_dim * 2 ** (len(SCREAMING_SNAKE_CASE__ ) - 1) ) SCREAMING_SNAKE_CASE__ : Dict = layer_scale_init_value SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""stem"""] + [F'''stage{idx}''' for idx in range(1 , len(SCREAMING_SNAKE_CASE__ ) + 1 )] SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = get_aligned_output_features_output_indices( out_features=SCREAMING_SNAKE_CASE__ , out_indices=SCREAMING_SNAKE_CASE__ , stage_names=self.stage_names )
25
"""simple docstring""" import importlib import inspect import json import os import re import shutil import sys from pathlib import Path from typing import Dict, Optional, Union from urllib import request from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info from packaging import version from .. import __version__ from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging lowercase_ = ( 'https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py' ) lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name def lowerCAmelCase ( ): """simple docstring""" __A = '''https://pypi.org/pypi/diffusers/json''' __A = json.loads(request.urlopen(__UpperCamelCase ).read() )['''releases'''].keys() return sorted(__UpperCamelCase , key=lambda __UpperCamelCase : version.Version(__UpperCamelCase ) ) def lowerCAmelCase ( ): """simple docstring""" if HF_MODULES_CACHE in sys.path: return sys.path.append(__UpperCamelCase ) os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase ) __A = Path(__UpperCamelCase ) / '''__init__.py''' if not init_path.exists(): init_path.touch() def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" init_hf_modules() __A = Path(__UpperCamelCase ) / name # If the parent module does not exist yet, recursively create it. if not dynamic_module_path.parent.exists(): create_dynamic_module(dynamic_module_path.parent ) os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase ) __A = dynamic_module_path / '''__init__.py''' if not init_path.exists(): init_path.touch() def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" with open(__UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f: __A = f.read() # Imports of the form `import .xxx` __A = re.findall('''^\s*import\s+\.(\S+)\s*$''' , __UpperCamelCase , flags=re.MULTILINE ) # Imports of the form `from .xxx import yyy` relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , __UpperCamelCase , flags=re.MULTILINE ) # Unique-ify return list(set(__UpperCamelCase ) ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = False __A = [module_file] __A = [] # Let's recurse through all relative imports while not no_change: __A = [] for f in files_to_check: new_imports.extend(get_relative_imports(__UpperCamelCase ) ) __A = Path(__UpperCamelCase ).parent __A = [str(module_path / m ) for m in new_imports] __A = [f for f in new_import_files if f not in all_relative_imports] __A = [f'{f}.py' for f in new_import_files] __A = len(__UpperCamelCase ) == 0 all_relative_imports.extend(__UpperCamelCase ) return all_relative_imports def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" with open(__UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f: __A = f.read() # Imports of the form `import xxx` __A = re.findall('''^\s*import\s+(\S+)\s*$''' , __UpperCamelCase , flags=re.MULTILINE ) # Imports of the form `from xxx import yyy` imports += re.findall('''^\s*from\s+(\S+)\s+import''' , __UpperCamelCase , flags=re.MULTILINE ) # Only keep the top-level module __A = [imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )] # Unique-ify and test we got them all __A = list(set(__UpperCamelCase ) ) __A = [] for imp in imports: try: importlib.import_module(__UpperCamelCase ) except ImportError: missing_packages.append(__UpperCamelCase ) if len(__UpperCamelCase ) > 0: raise ImportError( '''This modeling file requires the following packages that were not found in your environment: ''' f'{", ".join(__UpperCamelCase )}. Run `pip install {" ".join(__UpperCamelCase )}`' ) return get_relative_imports(__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" __A = module_path.replace(os.path.sep , '''.''' ) __A = importlib.import_module(__UpperCamelCase ) if class_name is None: return find_pipeline_class(__UpperCamelCase ) return getattr(__UpperCamelCase , __UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" from ..pipelines import DiffusionPipeline __A = dict(inspect.getmembers(__UpperCamelCase , inspect.isclass ) ) __A = None for cls_name, cls in cls_members.items(): if ( cls_name != DiffusionPipeline.__name__ and issubclass(cls , __UpperCamelCase ) and cls.__module__.split('''.''' )[0] != "diffusers" ): if pipeline_class is not None: raise ValueError( f'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:' f' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in' f' {loaded_module}.' ) __A = cls return pipeline_class def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , ): """simple docstring""" __A = str(__UpperCamelCase ) __A = os.path.join(__UpperCamelCase , __UpperCamelCase ) if os.path.isfile(__UpperCamelCase ): __A = module_file_or_url __A = '''local''' elif pretrained_model_name_or_path.count('''/''' ) == 0: __A = get_diffusers_versions() # cut ".dev0" __A = '''v''' + '''.'''.join(__version__.split('''.''' )[:3] ) # retrieve github version that matches if revision is None: __A = latest_version if latest_version[1:] in available_versions else '''main''' logger.info(f'Defaulting to latest_version: {revision}.' ) elif revision in available_versions: __A = f'v{revision}' elif revision == "main": __A = revision else: raise ValueError( f'`custom_revision`: {revision} does not exist. Please make sure to choose one of' f' {", ".join(available_versions + ["main"] )}.' ) # community pipeline on GitHub __A = COMMUNITY_PIPELINES_URL.format(revision=__UpperCamelCase , pipeline=__UpperCamelCase ) try: __A = cached_download( __UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , proxies=__UpperCamelCase , resume_download=__UpperCamelCase , local_files_only=__UpperCamelCase , use_auth_token=__UpperCamelCase , ) __A = '''git''' __A = pretrained_model_name_or_path + '''.py''' except EnvironmentError: logger.error(f'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' ) raise else: try: # Load from URL or cache if already cached __A = hf_hub_download( __UpperCamelCase , __UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , proxies=__UpperCamelCase , resume_download=__UpperCamelCase , local_files_only=__UpperCamelCase , use_auth_token=__UpperCamelCase , ) __A = os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) ) except EnvironmentError: logger.error(f'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' ) raise # Check we have all the requirements in our environment __A = check_imports(__UpperCamelCase ) # Now we move the module inside our cached dynamic modules. __A = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule create_dynamic_module(__UpperCamelCase ) __A = Path(__UpperCamelCase ) / full_submodule if submodule == "local" or submodule == "git": # We always copy local files (we could hash the file to see if there was a change, and give them the name of # that hash, to only copy when there is a modification but it seems overkill for now). # The only reason we do the copy is to avoid putting too many folders in sys.path. shutil.copy(__UpperCamelCase , submodule_path / module_file ) for module_needed in modules_needed: __A = f'{module_needed}.py' shutil.copy(os.path.join(__UpperCamelCase , __UpperCamelCase ) , submodule_path / module_needed ) else: # Get the commit hash # TODO: we will get this info in the etag soon, so retrieve it from there and not here. if isinstance(__UpperCamelCase , __UpperCamelCase ): __A = use_auth_token elif use_auth_token is True: __A = HfFolder.get_token() else: __A = None __A = model_info(__UpperCamelCase , revision=__UpperCamelCase , token=__UpperCamelCase ).sha # The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the # benefit of versioning. __A = submodule_path / commit_hash __A = full_submodule + os.path.sep + commit_hash create_dynamic_module(__UpperCamelCase ) if not (submodule_path / module_file).exists(): shutil.copy(__UpperCamelCase , submodule_path / module_file ) # Make sure we also have every file with relative for module_needed in modules_needed: if not (submodule_path / module_needed).exists(): get_cached_module_file( __UpperCamelCase , f'{module_needed}.py' , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , resume_download=__UpperCamelCase , proxies=__UpperCamelCase , use_auth_token=__UpperCamelCase , revision=__UpperCamelCase , local_files_only=__UpperCamelCase , ) return os.path.join(__UpperCamelCase , __UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , **__UpperCamelCase , ): """simple docstring""" __A = get_cached_module_file( __UpperCamelCase , __UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , resume_download=__UpperCamelCase , proxies=__UpperCamelCase , use_auth_token=__UpperCamelCase , revision=__UpperCamelCase , local_files_only=__UpperCamelCase , ) return get_class_in_module(__UpperCamelCase , final_module.replace('''.py''' , '''''' ) )
266
0
from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class lowercase ( UpperCamelCase__ ): def __init__( self , _a , _a=None , _a=None , _a=0 ) -> List[Any]: _A : int = 1.0 if scale is None else scale _A : Dict = 0.0 if loc is None else loc super().__init__(_a , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=_a )] ) @property def a__ ( self ) -> Tuple: return self.base_dist.mean * self.scale + self.loc @property def a__ ( self ) -> int: return self.base_dist.variance * self.scale**2 @property def a__ ( self ) -> Tuple: return self.variance.sqrt() class lowercase ( nn.Module ): def __init__( self , _a , _a , _a , **_a ) -> None: super().__init__(**_a ) _A : List[Any] = args_dim _A : Optional[Any] = nn.ModuleList([nn.Linear(_a , _a ) for dim in args_dim.values()] ) _A : Dict = domain_map def a__ ( self , _a ) -> Tuple[torch.Tensor]: _A : Optional[Any] = [proj(_a ) for proj in self.proj] return self.domain_map(*_a ) class lowercase ( nn.Module ): def __init__( self , _a ) -> List[Any]: super().__init__() _A : Any = function def a__ ( self , _a , *_a ) -> Optional[int]: return self.function(_a , *_a ) class lowercase : _a = 42 _a = 42 _a = 42 def __init__( self , _a = 1 ) -> None: _A : Tuple = dim _A : Tuple = {k: dim * self.args_dim[k] for k in self.args_dim} def a__ ( self , _a ) -> Optional[int]: if self.dim == 1: return self.distribution_class(*_a ) else: return Independent(self.distribution_class(*_a ) , 1 ) def a__ ( self , _a , _a = None , _a = None , ) -> Distribution: _A : Union[str, Any] = self._base_distribution(_a ) if loc is None and scale is None: return distr else: return AffineTransformed(_a , loc=_a , scale=_a , event_dim=self.event_dim ) @property def a__ ( self ) -> Tuple: return () if self.dim == 1 else (self.dim,) @property def a__ ( self ) -> int: return len(self.event_shape ) @property def a__ ( self ) -> float: return 0.0 def a__ ( self , _a ) -> nn.Module: return ParameterProjection( in_features=_a , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , ) def a__ ( self , *_a ) -> Dict: raise NotImplementedError() @staticmethod def a__ ( _a ) -> torch.Tensor: return (x + torch.sqrt(torch.square(_a ) + 4.0 )) / 2.0 class lowercase ( UpperCamelCase__ ): _a = {"df": 1, "loc": 1, "scale": 1} _a = StudentT @classmethod def a__ ( cls , _a , _a , _a ) -> int: _A : int = cls.squareplus(_a ).clamp_min(torch.finfo(scale.dtype ).eps ) _A : Any = 2.0 + cls.squareplus(_a ) return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 ) class lowercase ( UpperCamelCase__ ): _a = {"loc": 1, "scale": 1} _a = Normal @classmethod def a__ ( cls , _a , _a ) -> List[Any]: _A : List[str] = cls.squareplus(_a ).clamp_min(torch.finfo(scale.dtype ).eps ) return loc.squeeze(-1 ), scale.squeeze(-1 ) class lowercase ( UpperCamelCase__ ): _a = {"total_count": 1, "logits": 1} _a = NegativeBinomial @classmethod def a__ ( cls , _a , _a ) -> Union[str, Any]: _A : List[str] = cls.squareplus(_a ) return total_count.squeeze(-1 ), logits.squeeze(-1 ) def a__ ( self , _a ) -> Distribution: _A , _A : Tuple = distr_args if self.dim == 1: return self.distribution_class(total_count=_a , logits=_a ) else: return Independent(self.distribution_class(total_count=_a , logits=_a ) , 1 ) def a__ ( self , _a , _a = None , _a = None ) -> Distribution: _A , _A : List[Any] = distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits) )
26
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : Optional[int] ): '''simple docstring''' for model_result in results.values(): for batch_size, sequence_length in zip(model_result['''bs'''], model_result['''ss'''] ): __A = model_result['''result'''][batch_size][sequence_length] self.assertIsNotNone(_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = '''sgugger/tiny-distilbert-classification''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, only_pretrain_model=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, torchscript=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == '''cpu''', '''Cant do half precision''' ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, fpaa=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = AutoConfig.from_pretrained(_lowerCamelCase ) # set architectures equal to `None` __A = None __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase, configs=[config] ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == '''cpu''', '''Can\'t do half precision''' ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], fpaa=_lowerCamelCase, multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = AutoConfig.from_pretrained(_lowerCamelCase ) __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase, configs=[config] ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' __A = '''sshleifer/tinier_bart''' __A = AutoConfig.from_pretrained(_lowerCamelCase ) __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase, configs=[config] ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = AutoConfig.from_pretrained(_lowerCamelCase ) __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase, configs=[config] ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = '''sshleifer/tinier_bart''' __A = AutoConfig.from_pretrained(_lowerCamelCase ) __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase, configs=[config] ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' with tempfile.TemporaryDirectory() as tmp_dir: __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, save_to_csv=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], inference_time_csv_file=os.path.join(_lowerCamelCase, '''inf_time.csv''' ), train_memory_csv_file=os.path.join(_lowerCamelCase, '''train_mem.csv''' ), inference_memory_csv_file=os.path.join(_lowerCamelCase, '''inf_mem.csv''' ), train_time_csv_file=os.path.join(_lowerCamelCase, '''train_time.csv''' ), env_info_csv_file=os.path.join(_lowerCamelCase, '''env.csv''' ), multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) benchmark.run() self.assertTrue(Path(os.path.join(_lowerCamelCase, '''inf_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCamelCase, '''train_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCamelCase, '''inf_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCamelCase, '''train_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCamelCase, '''env.csv''' ) ).exists() ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' def _check_summary_is_not_empty(_lowerCamelCase : List[Any] ): self.assertTrue(hasattr(_lowerCamelCase, '''sequential''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''cumulative''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''current''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''total''' ) ) with tempfile.TemporaryDirectory() as tmp_dir: __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], log_filename=os.path.join(_lowerCamelCase, '''log.txt''' ), log_print=_lowerCamelCase, trace_memory_line_by_line=_lowerCamelCase, multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(_lowerCamelCase, '''log.txt''' ) ).exists() )
266
0
'''simple docstring''' from importlib import import_module from .logging import get_logger __lowercase : Tuple = get_logger(__name__) class __UpperCamelCase : def __init__( self , __a , __a=None ): '''simple docstring''' __a : List[Any] = attrs or [] if module is not None: for key in module.__dict__: if key in attrs or not key.startswith('__' ): setattr(self , __a , getattr(__a , __a ) ) __a : Dict = module._original_module if isinstance(__a , _PatchedModuleObj ) else module class __UpperCamelCase : A_ = [] def __init__( self , __a , __a , __a , __a=None ): '''simple docstring''' __a : int = obj __a : Union[str, Any] = target __a : int = new __a : Dict = target.split('.' )[0] __a : Any = {} __a : Any = attrs or [] def __enter__( self ): '''simple docstring''' *__a , __a : Dict = self.target.split('.' ) # Patch modules: # it's used to patch attributes of submodules like "os.path.join"; # in this case we need to patch "os" and "os.path" for i in range(len(__a ) ): try: __a : str = import_module('.'.join(submodules[: i + 1] ) ) except ModuleNotFoundError: continue # We iterate over all the globals in self.obj in case we find "os" or "os.path" for attr in self.obj.__dir__(): __a : Optional[int] = getattr(self.obj , __a ) # We don't check for the name of the global, but rather if its value *is* "os" or "os.path". # This allows to patch renamed modules like "from os import path as ospath". if obj_attr is submodule or ( (isinstance(__a , _PatchedModuleObj ) and obj_attr._original_module is submodule) ): __a : List[str] = obj_attr # patch at top level setattr(self.obj , __a , _PatchedModuleObj(__a , attrs=self.attrs ) ) __a : List[str] = getattr(self.obj , __a ) # construct lower levels patches for key in submodules[i + 1 :]: setattr(__a , __a , _PatchedModuleObj(getattr(__a , __a , __a ) , attrs=self.attrs ) ) __a : str = getattr(__a , __a ) # finally set the target attribute setattr(__a , __a , self.new ) # Patch attribute itself: # it's used for builtins like "open", # and also to patch "os.path.join" we may also need to patch "join" # itself if it was imported as "from os.path import join". if submodules: # if it's an attribute of a submodule like "os.path.join" try: __a : Any = getattr(import_module('.'.join(__a ) ) , __a ) except (AttributeError, ModuleNotFoundError): return # We iterate over all the globals in self.obj in case we find "os.path.join" for attr in self.obj.__dir__(): # We don't check for the name of the global, but rather if its value *is* "os.path.join". # This allows to patch renamed attributes like "from os.path import join as pjoin". if getattr(self.obj , __a ) is attr_value: __a : List[Any] = getattr(self.obj , __a ) setattr(self.obj , __a , self.new ) elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open" __a : str = globals()['__builtins__'][target_attr] setattr(self.obj , __a , self.new ) else: raise RuntimeError(f"""Tried to patch attribute {target_attr} instead of a submodule.""" ) def __exit__( self , *__a ): '''simple docstring''' for attr in list(self.original ): setattr(self.obj , __a , self.original.pop(__a ) ) def __UpperCAmelCase ( self ): '''simple docstring''' self.__enter__() self._active_patches.append(self ) def __UpperCAmelCase ( self ): '''simple docstring''' try: self._active_patches.remove(self ) except ValueError: # If the patch hasn't been started this will fail return None return self.__exit__()
27
"""simple docstring""" import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowercase_ = get_tests_dir('fixtures/test_sentencepiece_no_bos.model') @require_sentencepiece @require_tokenizers class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : Optional[Any] = PegasusTokenizer A_ : int = PegasusTokenizerFast A_ : Optional[Any] = True A_ : Union[str, Any] = True def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing __A = PegasusTokenizer(_lowerCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' return PegasusTokenizer.from_pretrained('''google/pegasus-large''' ) def _SCREAMING_SNAKE_CASE ( self : int, **_lowerCamelCase : List[Any] ): '''simple docstring''' return PegasusTokenizer.from_pretrained(self.tmpdirname, **_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : Dict ): '''simple docstring''' return ("This is a test", "This is a test") def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' __A = '''</s>''' __A = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ), _lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ), _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' __A = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0], '''<pad>''' ) self.assertEqual(vocab_keys[1], '''</s>''' ) self.assertEqual(vocab_keys[-1], '''v''' ) self.assertEqual(len(_lowerCamelCase ), 11_03 ) def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size, 11_03 ) def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) __A = self.tokenizer_class.from_pretrained(self.tmpdirname ) __A = ( '''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important''' ''' </s> <pad> <pad> <pad>''' ) __A = rust_tokenizer([raw_input_str], return_tensors=_lowerCamelCase, add_special_tokens=_lowerCamelCase ).input_ids[0] __A = py_tokenizer([raw_input_str], return_tensors=_lowerCamelCase, add_special_tokens=_lowerCamelCase ).input_ids[0] self.assertListEqual(_lowerCamelCase, _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' __A = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word __A = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.''' __A = [2, 4_13, 6_15, 1_14, 3, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1] __A = tokenizer([raw_input_str], return_tensors=_lowerCamelCase ).input_ids[0] self.assertListEqual(_lowerCamelCase, _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 9_61_03 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 1_03 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_05 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 10_24 __A = '''To ensure a smooth flow of bank resolutions.''' __A = [4_13, 6_15, 1_14, 22_91, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1] __A = tokenizer([raw_input_str], return_tensors=_lowerCamelCase ).input_ids[0] self.assertListEqual(_lowerCamelCase, _lowerCamelCase ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = ['''This is going to be way too long.''' * 1_50, '''short example'''] __A = ['''not super long but more than 5 tokens''', '''tiny'''] __A = self._large_tokenizer(_lowerCamelCase, padding=_lowerCamelCase, truncation=_lowerCamelCase, return_tensors='''pt''' ) __A = self._large_tokenizer( text_target=_lowerCamelCase, max_length=5, padding=_lowerCamelCase, truncation=_lowerCamelCase, return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 10_24) assert batch.attention_mask.shape == (2, 10_24) assert targets["input_ids"].shape == (2, 5) assert len(_lowerCamelCase ) == 2 # input_ids, attention_mask. @slow def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' # fmt: off __A = {'''input_ids''': [[3_89_79, 1_43, 1_84_85, 6_06, 1_30, 2_66_69, 8_76_86, 1_21, 5_41_89, 11_29, 1_11, 2_66_69, 8_76_86, 1_21, 91_14, 1_47_87, 1_21, 1_32_49, 1_58, 5_92, 9_56, 1_21, 1_46_21, 3_15_76, 1_43, 6_26_13, 1_08, 96_88, 9_30, 4_34_30, 1_15_62, 6_26_13, 3_04, 1_08, 1_14_43, 8_97, 1_08, 93_14, 1_74_15, 6_33_99, 1_08, 1_14_43, 76_14, 1_83_16, 1_18, 42_84, 71_48, 1_24_30, 1_43, 14_00, 2_57_03, 1_58, 1_11, 42_84, 71_48, 1_17_72, 1_43, 2_12_97, 10_64, 1_58, 1_22, 2_04, 35_06, 17_54, 11_33, 1_47_87, 15_81, 1_15, 3_32_24, 44_82, 1_11, 13_55, 1_10, 2_91_73, 3_17, 5_08_33, 1_08, 2_01_47, 9_46_65, 1_11, 7_71_98, 1_07, 1], [1_10, 6_26_13, 1_17, 6_38, 1_12, 11_33, 1_21, 2_00_98, 13_55, 7_90_50, 1_38_72, 1_35, 15_96, 5_35_41, 13_52, 1_41, 1_30_39, 55_42, 1_24, 3_02, 5_18, 1_11, 2_68, 29_56, 1_15, 1_49, 44_27, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_39, 12_35, 27_99, 1_82_89, 1_77_80, 2_04, 1_09, 94_74, 12_96, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_lowerCamelCase, model_name='''google/bigbird-pegasus-large-arxiv''', revision='''ba85d0851d708441f91440d509690f1ab6353415''', ) @require_sentencepiece @require_tokenizers class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : str = PegasusTokenizer A_ : Union[str, Any] = PegasusTokenizerFast A_ : Any = True A_ : str = True def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing __A = PegasusTokenizer(_lowerCamelCase, offset=0, mask_token_sent=_lowerCamelCase, mask_token='''[MASK]''' ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' ) def _SCREAMING_SNAKE_CASE ( self : Optional[int], **_lowerCamelCase : Dict ): '''simple docstring''' return PegasusTokenizer.from_pretrained(self.tmpdirname, **_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : List[str] ): '''simple docstring''' return ("This is a test", "This is a test") def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' __A = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) __A = self.tokenizer_class.from_pretrained(self.tmpdirname ) __A = ( '''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>''' ''' <pad> <pad> <pad>''' ) __A = rust_tokenizer([raw_input_str], return_tensors=_lowerCamelCase, add_special_tokens=_lowerCamelCase ).input_ids[0] __A = py_tokenizer([raw_input_str], return_tensors=_lowerCamelCase, add_special_tokens=_lowerCamelCase ).input_ids[0] self.assertListEqual(_lowerCamelCase, _lowerCamelCase ) @require_torch def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' __A = ['''This is going to be way too long.''' * 10_00, '''short example'''] __A = ['''not super long but more than 5 tokens''', '''tiny'''] __A = self._large_tokenizer(_lowerCamelCase, padding=_lowerCamelCase, truncation=_lowerCamelCase, return_tensors='''pt''' ) __A = self._large_tokenizer( text_target=_lowerCamelCase, max_length=5, padding=_lowerCamelCase, truncation=_lowerCamelCase, return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 40_96) assert batch.attention_mask.shape == (2, 40_96) assert targets["input_ids"].shape == (2, 5) assert len(_lowerCamelCase ) == 2 # input_ids, attention_mask. def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' __A = ( '''This is an example string that is used to test the original TF implementation against the HF''' ''' implementation''' ) __A = self._large_tokenizer(_lowerCamelCase ).input_ids self.assertListEqual( _lowerCamelCase, [1_82, 1_17, 1_42, 5_87, 42_11, 1_20, 1_17, 2_63, 1_12, 8_04, 1_09, 8_56, 2_50_16, 31_37, 4_64, 1_09, 2_69_55, 31_37, 1], )
266
0
'''simple docstring''' import unittest from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow if is_flax_available(): import jax from transformers.models.auto.modeling_flax_auto import FlaxAutoModel from transformers.models.bert.modeling_flax_bert import FlaxBertModel from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel @require_flax class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" @slow def A ( self : Optional[int] ): """simple docstring""" for model_name in ["bert-base-cased", "bert-large-uncased"]: with self.subTest(UpperCamelCase__ ): UpperCamelCase = AutoConfig.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) UpperCamelCase = FlaxAutoModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) @slow def A ( self : List[Any] ): """simple docstring""" for model_name in ["roberta-base", "roberta-large"]: with self.subTest(UpperCamelCase__ ): UpperCamelCase = AutoConfig.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) UpperCamelCase = FlaxAutoModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) @slow def A ( self : Union[str, Any] ): """simple docstring""" for model_name in ["bert-base-cased", "bert-large-uncased"]: UpperCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase__ ) UpperCamelCase = FlaxBertModel.from_pretrained(UpperCamelCase__ ) UpperCamelCase = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX ) @jax.jit def eval(**UpperCamelCase__ : str ): return model(**UpperCamelCase__ ) eval(**UpperCamelCase__ ).block_until_ready() @slow def A ( self : List[Any] ): """simple docstring""" for model_name in ["roberta-base", "roberta-large"]: UpperCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase__ ) UpperCamelCase = FlaxRobertaModel.from_pretrained(UpperCamelCase__ ) UpperCamelCase = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX ) @jax.jit def eval(**UpperCamelCase__ : str ): return model(**UpperCamelCase__ ) eval(**UpperCamelCase__ ).block_until_ready() def A ( self : Union[str, Any] ): """simple docstring""" with self.assertRaisesRegex( UpperCamelCase__ , 'bert-base is not a local folder and is not a valid model identifier' ): UpperCamelCase = FlaxAutoModel.from_pretrained('bert-base' ) def A ( self : List[Any] ): """simple docstring""" with self.assertRaisesRegex( UpperCamelCase__ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ): UpperCamelCase = FlaxAutoModel.from_pretrained(UpperCamelCase__ , revision='aaaaaa' ) def A ( self : Union[str, Any] ): """simple docstring""" with self.assertRaisesRegex( UpperCamelCase__ , 'hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack' , ): UpperCamelCase = FlaxAutoModel.from_pretrained('hf-internal-testing/config-no-model' ) def A ( self : str ): """simple docstring""" with self.assertRaisesRegex(UpperCamelCase__ , 'Use `from_pt=True` to load this model' ): UpperCamelCase = FlaxAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
28
"""simple docstring""" import re def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" return [char.split() for char in re.split(r'''[^ a-z A-Z 0-9 \s]''' , str_ )] def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = split_input(str_ ) return "".join( [''''''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" try: __A = split_input(__UpperCamelCase ) if upper: __A = ''''''.join( [ separator.join([char.upper() for char in sub_str] ) for sub_str in string_split ] ) else: __A = ''''''.join( [ separator.join([char.lower() for char in sub_str] ) for sub_str in string_split ] ) return res_str except IndexError: return "not valid string" def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" return to_simple_case(__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" try: __A = to_simple_case(__UpperCamelCase ) return res_str[0].lower() + res_str[1:] except IndexError: return "not valid string" def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" return to_complex_case(__UpperCamelCase , __UpperCamelCase , '''_''' ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" return to_complex_case(__UpperCamelCase , __UpperCamelCase , '''-''' ) if __name__ == "__main__": __import__('doctest').testmod()
266
0
import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/config.json', 'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/config.json', } class lowerCamelCase (_snake_case ): '''simple docstring''' _snake_case : Dict = '''xlnet''' _snake_case : Optional[int] = ['''mems'''] _snake_case : List[str] = { '''n_token''': '''vocab_size''', # Backward compatibility '''hidden_size''': '''d_model''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self , _UpperCamelCase=3_2_0_0_0 , _UpperCamelCase=1_0_2_4 , _UpperCamelCase=2_4 , _UpperCamelCase=1_6 , _UpperCamelCase=4_0_9_6 , _UpperCamelCase="gelu" , _UpperCamelCase=True , _UpperCamelCase="bi" , _UpperCamelCase=0.02 , _UpperCamelCase=1E-12 , _UpperCamelCase=0.1 , _UpperCamelCase=5_1_2 , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=-1 , _UpperCamelCase=False , _UpperCamelCase="last" , _UpperCamelCase=True , _UpperCamelCase="tanh" , _UpperCamelCase=0.1 , _UpperCamelCase=5 , _UpperCamelCase=5 , _UpperCamelCase=5 , _UpperCamelCase=1 , _UpperCamelCase=2 , **_UpperCamelCase , ) -> Any: UpperCAmelCase_ : int = vocab_size UpperCAmelCase_ : int = d_model UpperCAmelCase_ : Any = n_layer UpperCAmelCase_ : Tuple = n_head if d_model % n_head != 0: raise ValueError(f"'d_model % n_head' ({d_model % n_head}) should be equal to 0" ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( f"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})" ) UpperCAmelCase_ : Dict = d_model // n_head UpperCAmelCase_ : int = ff_activation UpperCAmelCase_ : Tuple = d_inner UpperCAmelCase_ : Any = untie_r UpperCAmelCase_ : Optional[int] = attn_type UpperCAmelCase_ : Optional[int] = initializer_range UpperCAmelCase_ : Union[str, Any] = layer_norm_eps UpperCAmelCase_ : int = dropout UpperCAmelCase_ : Optional[int] = mem_len UpperCAmelCase_ : str = reuse_len UpperCAmelCase_ : List[Any] = bi_data UpperCAmelCase_ : Tuple = clamp_len UpperCAmelCase_ : Dict = same_length UpperCAmelCase_ : int = summary_type UpperCAmelCase_ : Optional[Any] = summary_use_proj UpperCAmelCase_ : List[str] = summary_activation UpperCAmelCase_ : Dict = summary_last_dropout UpperCAmelCase_ : str = start_n_top UpperCAmelCase_ : str = end_n_top UpperCAmelCase_ : Any = bos_token_id UpperCAmelCase_ : Tuple = pad_token_id UpperCAmelCase_ : str = eos_token_id if "use_cache" in kwargs: warnings.warn( 'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`' ' instead.' , _UpperCamelCase , ) UpperCAmelCase_ : Any = kwargs['use_cache'] UpperCAmelCase_ : Union[str, Any] = use_mems_eval UpperCAmelCase_ : Union[str, Any] = use_mems_train super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase ) @property def __UpperCAmelCase ( self ) -> Tuple: logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit." ) return -1 @max_position_embeddings.setter def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[int]: # Message copied from Transformer-XL documentation raise NotImplementedError( f"The model {self.model_type} is one of the few models that has no sequence length limit." )
29
"""simple docstring""" from __future__ import annotations class snake_case : '''simple docstring''' def __init__( self : int, _lowerCamelCase : List[Any]=None ): '''simple docstring''' __A = data __A = None def __repr__( self : Union[str, Any] ): '''simple docstring''' __A = [] __A = self while temp: string_rep.append(f'{temp.data}' ) __A = temp.next return "->".join(_lowerCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" if not elements_list: raise Exception('''The Elements List is empty''' ) __A = __A = Node(elements_list[0] ) for i in range(1 , len(__UpperCamelCase ) ): __A = Node(elements_list[i] ) __A = current.next return head def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" if head_node is not None and isinstance(__UpperCamelCase , __UpperCamelCase ): print_reverse(head_node.next ) print(head_node.data ) def lowerCAmelCase ( ): """simple docstring""" from doctest import testmod testmod() __A = make_linked_list([1_4, 5_2, 1_4, 1_2, 4_3] ) print('''Linked List:''' ) print(__UpperCamelCase ) print('''Elements in Reverse:''' ) print_reverse(__UpperCamelCase ) if __name__ == "__main__": main()
266
0
import random import unittest from torch.utils.data import BatchSampler, DataLoader, IterableDataset from accelerate import Accelerator from accelerate.data_loader import ( BatchSamplerShard, DataLoaderDispatcher, DataLoaderShard, IterableDatasetShard, SkipBatchSampler, SkipDataLoader, skip_first_batches, ) class lowercase__( UpperCAmelCase ): """simple docstring""" def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple=0.01 , SCREAMING_SNAKE_CASE_ : int=1_0_0_0 ) -> Tuple: lowercase_ = p_stop lowercase_ = max_length def __iter__( self : List[Any] ) -> Dict: lowercase_ = 0 lowercase_ = False while not stop and count < self.max_length: yield count count += 1 lowercase_ = random.random() < self.p_stop class lowercase__( unittest.TestCase ): """simple docstring""" def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict=False , SCREAMING_SNAKE_CASE_ : Dict=True ) -> List[str]: lowercase_ = [ BatchSamplerShard(SCREAMING_SNAKE_CASE_ , 2 , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) for i in range(2 ) ] lowercase_ = [list(SCREAMING_SNAKE_CASE_ ) for batch_sampler_shard in batch_sampler_shards] if not split_batches: self.assertListEqual([len(SCREAMING_SNAKE_CASE_ ) for shard in batch_sampler_shards] , [len(SCREAMING_SNAKE_CASE_ ) for e in expected] ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Dict ) -> Optional[Any]: # Check the shards when the dataset is a round multiple of total batch size. lowercase_ = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ ) lowercase_ = [ [[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]], [[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]], ] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase_ = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ ) # Expected shouldn't change self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. lowercase_ = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ ) lowercase_ = [ [[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]], [[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [0, 1, 2]], ] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase_ = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ ) lowercase_ = [ [[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]], [[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]], ] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. lowercase_ = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ ) lowercase_ = [ [[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]], [[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 0, 1]], ] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase_ = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ ) lowercase_ = [ [[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]], [[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]], ] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. lowercase_ = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ ) lowercase_ = [ [[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 0]], [[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [1, 2, 3]], ] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase_ = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ ) lowercase_ = [ [[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]], [[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]], ] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Check the shards when the dataset is very small. lowercase_ = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ ) lowercase_ = [[[0, 1, 0]], [[1, 0, 1]]] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase_ = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ ) lowercase_ = [[], []] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Tuple ) -> List[str]: # Check the shards when the dataset is a round multiple of batch size. lowercase_ = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ ) lowercase_ = [ [[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]], [[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]], ] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ ) lowercase_ = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ ) # Expected shouldn't change self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ ) # Check the shards when the dataset is not a round multiple of batch size. lowercase_ = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ ) lowercase_ = [ [[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]], [[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [0, 1]], ] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ ) lowercase_ = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ ) lowercase_ = [ [[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]], [[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]], ] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. lowercase_ = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ ) lowercase_ = [ [[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 0]], [[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [1, 2]], ] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ ) lowercase_ = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ ) lowercase_ = [ [[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]], [[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]], ] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ ) # Check the shards when the dataset is very small. lowercase_ = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ ) lowercase_ = [[[0, 1]], [[0, 1]]] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ ) lowercase_ = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ ) lowercase_ = [[], []] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Dict ) -> List[Any]: # Check the shards when the dataset is a round multiple of total batch size. lowercase_ = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ ) lowercase_ = [ [[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]], [[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]], ] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) lowercase_ = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ ) # Expected shouldn't change self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. lowercase_ = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ ) lowercase_ = [ [[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]], [[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]], ] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) lowercase_ = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ ) lowercase_ = [ [[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]], [[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]], ] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. lowercase_ = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ ) lowercase_ = [ [[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]], [[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1]], ] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) lowercase_ = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ ) lowercase_ = [ [[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]], [[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]], ] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. lowercase_ = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ ) lowercase_ = [ [[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9]], [[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]], ] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) lowercase_ = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ ) lowercase_ = [ [[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]], [[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]], ] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) # Check the shards when the dataset is very small. lowercase_ = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ ) lowercase_ = [[[0, 1]], []] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) lowercase_ = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ ) lowercase_ = [[], []] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : int ) -> Optional[int]: # Check the shards when the dataset is a round multiple of batch size. lowercase_ = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ ) lowercase_ = [ [[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]], [[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]], ] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) lowercase_ = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ ) # Expected shouldn't change self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) # Check the shards when the dataset is not a round multiple of batch size. lowercase_ = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ ) lowercase_ = [ [[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]], [[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]], ] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) lowercase_ = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ ) lowercase_ = [ [[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]], [[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]], ] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. lowercase_ = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ ) lowercase_ = [ [[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0]], [[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]], ] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) lowercase_ = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ ) lowercase_ = [ [[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]], [[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]], ] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) # Check the shards when the dataset is very small. lowercase_ = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ ) lowercase_ = [[[0, 1]], []] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) lowercase_ = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ ) lowercase_ = [[], []] self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Tuple ) -> str: lowercase_ = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 1_0, 1_1], [1_2, 1_3]] lowercase_ = [BatchSamplerShard(SCREAMING_SNAKE_CASE_ , 2 , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) for i in range(2 )] self.assertEqual(len(batch_sampler_shards[0] ) , 3 ) self.assertEqual(len(batch_sampler_shards[1] ) , 2 ) self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [1_2, 1_3]] ) self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 1_0, 1_1]] ) def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str]=False , SCREAMING_SNAKE_CASE_ : List[str]=2 , SCREAMING_SNAKE_CASE_ : List[str]=False ) -> Any: random.seed(SCREAMING_SNAKE_CASE_ ) lowercase_ = list(SCREAMING_SNAKE_CASE_ ) lowercase_ = [ IterableDatasetShard( SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , drop_last=SCREAMING_SNAKE_CASE_ , num_processes=SCREAMING_SNAKE_CASE_ , process_index=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , ) for i in range(SCREAMING_SNAKE_CASE_ ) ] lowercase_ = [] for iterable_dataset_shard in iterable_dataset_shards: # Since our random iterable dataset will be... random... we need to use a seed to get reproducible results. random.seed(SCREAMING_SNAKE_CASE_ ) iterable_dataset_lists.append(list(SCREAMING_SNAKE_CASE_ ) ) lowercase_ = batch_size // num_processes if split_batches else batch_size # All iterable dataset shard should have the same length, a round multiple of shard_batch_size lowercase_ = iterable_dataset_lists[0] for l in iterable_dataset_lists[1:]: self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) ) self.assertTrue(len(SCREAMING_SNAKE_CASE_ ) % shard_batch_size == 0 ) lowercase_ = [] for idx in range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ): for l in iterable_dataset_lists: observed += l[idx : idx + shard_batch_size] if not drop_last: while len(SCREAMING_SNAKE_CASE_ ) < len(SCREAMING_SNAKE_CASE_ ): reference += reference self.assertListEqual(SCREAMING_SNAKE_CASE_ , reference[: len(SCREAMING_SNAKE_CASE_ )] ) def _lowercase ( self : Any ) -> Union[str, Any]: lowercase_ = 4_2 lowercase_ = RandomIterableDataset() self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ ) self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ ) self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ ) self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ ) # Edge case with a very small dataset lowercase_ = RandomIterableDataset(max_length=2 ) self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ ) self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ ) self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ ) self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : List[str] ) -> List[Any]: lowercase_ = BatchSampler(range(1_6 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ ) lowercase_ = SkipBatchSampler(SCREAMING_SNAKE_CASE_ , 2 ) self.assertListEqual(list(SCREAMING_SNAKE_CASE_ ) , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] ) def _lowercase ( self : Any ) -> Any: lowercase_ = SkipDataLoader(list(range(1_6 ) ) , batch_size=4 , skip_batches=2 ) self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] ) def _lowercase ( self : Any ) -> Optional[int]: lowercase_ = DataLoader(list(range(1_6 ) ) , batch_size=4 ) lowercase_ = skip_first_batches(SCREAMING_SNAKE_CASE_ , num_batches=2 ) self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] ) def _lowercase ( self : Any ) -> Optional[int]: lowercase_ = DataLoaderShard(list(range(1_6 ) ) , batch_size=4 ) for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) def _lowercase ( self : Any ) -> Any: Accelerator() lowercase_ = DataLoaderDispatcher(range(1_6 ) , batch_size=4 ) for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
30
"""simple docstring""" from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging lowercase_ = logging.get_logger(__name__) class snake_case ( _lowerCAmelCase ): '''simple docstring''' A_ : int = ["input_features", "attention_mask"] def __init__( self : Optional[Any], _lowerCamelCase : Union[str, Any]=80, _lowerCamelCase : int=1_60_00, _lowerCamelCase : Any=80, _lowerCamelCase : List[str]=0.0, _lowerCamelCase : int=True, _lowerCamelCase : Optional[Any]=True, _lowerCamelCase : Optional[int]=True, **_lowerCamelCase : List[str], ): '''simple docstring''' super().__init__(feature_size=_lowerCamelCase, sampling_rate=_lowerCamelCase, padding_value=_lowerCamelCase, **_lowerCamelCase ) __A = num_mel_bins __A = do_ceptral_normalize __A = normalize_means __A = normalize_vars __A = True def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : np.ndarray, ): '''simple docstring''' __A = waveform * (2**15) # Kaldi compliance: 16-bit signed integers __A = torch.from_numpy(_lowerCamelCase ).unsqueeze(0 ) __A = ta_kaldi.fbank(_lowerCamelCase, num_mel_bins=self.num_mel_bins, sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : np.ndarray, _lowerCamelCase : int, _lowerCamelCase : Optional[bool] = True, _lowerCamelCase : Optional[bool] = True, _lowerCamelCase : float = 0.0, ): '''simple docstring''' # make sure we normalize float32 arrays if normalize_means: __A = x[:input_length].mean(axis=0 ) __A = np.subtract(_lowerCamelCase, _lowerCamelCase ) if normalize_vars: __A = x[:input_length].std(axis=0 ) __A = np.divide(_lowerCamelCase, _lowerCamelCase ) if input_length < x.shape[0]: __A = padding_value # make sure array is in float32 __A = x.astype(np.floataa ) return x def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : List[np.ndarray], _lowerCamelCase : Optional[np.ndarray] = None ): '''simple docstring''' __A = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(_lowerCamelCase, _lowerCamelCase, self.normalize_means, self.normalize_vars, self.padding_value ) for x, n in zip(_lowerCamelCase, _lowerCamelCase ) ] def __call__( self : Optional[Any], _lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], _lowerCamelCase : Union[bool, str, PaddingStrategy] = False, _lowerCamelCase : Optional[int] = None, _lowerCamelCase : bool = False, _lowerCamelCase : Optional[int] = None, _lowerCamelCase : Optional[Union[str, TensorType]] = None, _lowerCamelCase : Optional[int] = None, _lowerCamelCase : Optional[bool] = None, **_lowerCamelCase : Optional[Any], ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of' f' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with' f' {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) __A = isinstance(_lowerCamelCase, np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'Only mono-channel audio is supported for input to {self}' ) __A = is_batched_numpy or ( isinstance(_lowerCamelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) )) ) if is_batched: __A = [np.asarray(_lowerCamelCase, dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(_lowerCamelCase, np.ndarray ): __A = np.asarray(_lowerCamelCase, dtype=np.floataa ) elif isinstance(_lowerCamelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __A = raw_speech.astype(np.floataa ) # always return batch if not is_batched: __A = [raw_speech] # extract fbank features __A = [self._extract_fbank_features(_lowerCamelCase ) for waveform in raw_speech] # convert into correct format for padding __A = BatchFeature({'''input_features''': features} ) __A = self.pad( _lowerCamelCase, padding=_lowerCamelCase, max_length=_lowerCamelCase, truncation=_lowerCamelCase, pad_to_multiple_of=_lowerCamelCase, return_attention_mask=_lowerCamelCase, **_lowerCamelCase, ) # make sure list is in array format __A = padded_inputs.get('''input_features''' ) if isinstance(input_features[0], _lowerCamelCase ): __A = [np.asarray(_lowerCamelCase, dtype=np.floataa ) for feature in input_features] __A = padded_inputs.get('''attention_mask''' ) if attention_mask is not None: __A = [np.asarray(_lowerCamelCase, dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: __A = ( np.array(_lowerCamelCase, dtype=np.intaa ) if self._get_padding_strategies(_lowerCamelCase, max_length=_lowerCamelCase ) is not PaddingStrategy.DO_NOT_PAD else None ) __A = self.normalize( padded_inputs['''input_features'''], attention_mask=_lowerCamelCase ) if return_tensors is not None: __A = padded_inputs.convert_to_tensors(_lowerCamelCase ) return padded_inputs
266
0
'''simple docstring''' from __future__ import annotations def UpperCamelCase_ ( _UpperCAmelCase : int | float | str , _UpperCAmelCase : int | float | str ) -> list[str]: """simple docstring""" if nth_term == "": return [""] _UpperCAmelCase : Optional[int] = int(_UpperCAmelCase ) _UpperCAmelCase : Dict = int(_UpperCAmelCase ) _UpperCAmelCase : list[str] = [] for temp in range(int(_UpperCAmelCase ) ): series.append(F"""1 / {pow(temp + 1 , int(_UpperCAmelCase ) )}""" if series else "1" ) return series if __name__ == "__main__": import doctest doctest.testmod() __SCREAMING_SNAKE_CASE : List[Any] = int(input("""Enter the last number (nth term) of the P-Series""")) __SCREAMING_SNAKE_CASE : Tuple = int(input("""Enter the power for P-Series""")) print("""Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p""") print(p_series(nth_term, power))
31
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class snake_case ( unittest.TestCase ): '''simple docstring''' def __init__( self : List[str], _lowerCamelCase : Optional[Any], _lowerCamelCase : Union[str, Any]=13, _lowerCamelCase : Any=3, _lowerCamelCase : Optional[int]=2_24, _lowerCamelCase : str=30, _lowerCamelCase : Dict=4_00, _lowerCamelCase : Union[str, Any]=True, _lowerCamelCase : Any=None, _lowerCamelCase : Optional[Any]=True, _lowerCamelCase : Any=[0.5, 0.5, 0.5], _lowerCamelCase : List[str]=[0.5, 0.5, 0.5], ): '''simple docstring''' __A = size if size is not None else {'''height''': 18, '''width''': 18} __A = parent __A = batch_size __A = num_channels __A = image_size __A = min_resolution __A = max_resolution __A = do_resize __A = size __A = do_normalize __A = image_mean __A = image_std def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : str = ViTImageProcessor if is_vision_available() else None def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = EfficientFormerImageProcessorTester(self ) @property def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' return self.image_proc_tester.prepare_image_processor_dict() def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCamelCase, '''image_mean''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''image_std''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''do_normalize''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''do_resize''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''size''' ) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' # Initialize image_processor __A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __A = prepare_image_inputs(self.image_proc_tester, equal_resolution=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, Image.Image ) # Test not batched input __A = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), ) # Test batched __A = image_processor(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), ) def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' # Initialize image_processor __A = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __A = prepare_image_inputs(self.image_proc_tester, equal_resolution=_lowerCamelCase, numpify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, np.ndarray ) # Test not batched input __A = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), ) # Test batched __A = image_processor(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), ) def _SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' # Initialize image_processor __A = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __A = prepare_image_inputs(self.image_proc_tester, equal_resolution=_lowerCamelCase, torchify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, torch.Tensor ) # Test not batched input __A = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), ) # Test batched __A = image_processor(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), )
266
0
import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html UpperCAmelCase_ : Optional[int] = 'platform' import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class SCREAMING_SNAKE_CASE__ : snake_case__ : Any = PegasusConfig snake_case__ : Any = {} snake_case__ : str = '''gelu''' def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=1_3 , SCREAMING_SNAKE_CASE__ : List[Any]=7 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : Tuple=9_9 , SCREAMING_SNAKE_CASE__ : Any=3_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=5 , SCREAMING_SNAKE_CASE__ : Tuple=4 , SCREAMING_SNAKE_CASE__ : str=3_7 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : str=2_0 , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : List[str]=1 , SCREAMING_SNAKE_CASE__ : Any=0 , ) -> Optional[Any]: a_ : List[Any] = parent a_ : Optional[Any] = batch_size a_ : Dict = seq_length a_ : Optional[int] = is_training a_ : List[Any] = use_labels a_ : List[str] = vocab_size a_ : str = hidden_size a_ : Tuple = num_hidden_layers a_ : List[str] = num_attention_heads a_ : Tuple = intermediate_size a_ : List[str] = hidden_dropout_prob a_ : List[Any] = attention_probs_dropout_prob a_ : List[Any] = max_position_embeddings a_ : List[str] = eos_token_id a_ : Optional[Any] = pad_token_id a_ : Union[str, Any] = bos_token_id def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict: a_ : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) a_ : List[Any] = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) a_ : List[str] = np.concatenate([input_ids, eos_tensor] , axis=1 ) a_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a_ : Optional[Any] = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) a_ : Optional[Any] = prepare_pegasus_inputs_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return config, inputs_dict def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[Any]: a_ : Any = 2_0 a_ : int = model_class_name(SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = model.encode(inputs_dict['input_ids'] ) a_ , a_ : List[Any] = ( inputs_dict['decoder_input_ids'], inputs_dict['decoder_attention_mask'], ) a_ : List[Any] = model.init_cache(decoder_input_ids.shape[0] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) a_ : Dict = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' ) a_ : List[Any] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) a_ : Union[str, Any] = model.decode( decoder_input_ids[:, :-1] , SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ , decoder_position_ids=SCREAMING_SNAKE_CASE__ , ) a_ : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' ) a_ : Union[str, Any] = model.decode( decoder_input_ids[:, -1:] , SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=SCREAMING_SNAKE_CASE__ , ) a_ : Dict = model.decode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) a_ : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" ) def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[Any]: a_ : Tuple = 2_0 a_ : Optional[int] = model_class_name(SCREAMING_SNAKE_CASE__ ) a_ : int = model.encode(inputs_dict['input_ids'] ) a_ , a_ : Dict = ( inputs_dict['decoder_input_ids'], inputs_dict['decoder_attention_mask'], ) a_ : Optional[int] = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) a_ : str = model.init_cache(decoder_input_ids.shape[0] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) a_ : int = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) a_ : List[str] = model.decode( decoder_input_ids[:, :-1] , SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ , decoder_position_ids=SCREAMING_SNAKE_CASE__ , ) a_ : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' ) a_ : Any = model.decode( decoder_input_ids[:, -1:] , SCREAMING_SNAKE_CASE__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , decoder_position_ids=SCREAMING_SNAKE_CASE__ , ) a_ : Optional[int] = model.decode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ ) a_ : List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" ) def SCREAMING_SNAKE_CASE_ ( __A : Dict , __A : int , __A : Union[str, Any] , __A : Optional[int]=None , __A : str=None , ) -> List[Any]: """simple docstring""" if attention_mask is None: a_ : List[str] = np.not_equal(__A , config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: a_ : str = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ), ] , axis=-1 , ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class SCREAMING_SNAKE_CASE__ ( lowercase__ , unittest.TestCase ): snake_case__ : List[Any] = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) snake_case__ : Optional[Any] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () snake_case__ : Optional[Any] = True snake_case__ : List[str] = False snake_case__ : str = False snake_case__ : Any = False def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any: a_ : Any = FlaxPegasusModelTester(self ) a_ : Tuple = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict: a_ , a_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : int ) -> str: a_ , a_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]: a_ , a_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): a_ : Dict = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE__ ) @jax.jit def encode_jitted(SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple=None , **SCREAMING_SNAKE_CASE__ : Any ): return model.encode(input_ids=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ ) with self.subTest('JIT Enabled' ): a_ : Any = encode_jitted(**SCREAMING_SNAKE_CASE__ ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): a_ : Optional[int] = encode_jitted(**SCREAMING_SNAKE_CASE__ ).to_tuple() self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) ) for jitted_output, output in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): self.assertEqual(jitted_output.shape , output.shape ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict: a_ , a_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): a_ : List[str] = model_class(SCREAMING_SNAKE_CASE__ ) a_ : int = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] ) a_ : str = { 'decoder_input_ids': inputs_dict['decoder_input_ids'], 'decoder_attention_mask': inputs_dict['decoder_attention_mask'], 'encoder_outputs': encoder_outputs, } @jax.jit def decode_jitted(SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] ): return model.decode( decoder_input_ids=SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , encoder_outputs=SCREAMING_SNAKE_CASE__ , ) with self.subTest('JIT Enabled' ): a_ : List[str] = decode_jitted(**SCREAMING_SNAKE_CASE__ ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): a_ : Dict = decode_jitted(**SCREAMING_SNAKE_CASE__ ).to_tuple() self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) ) for jitted_output, output in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): self.assertEqual(jitted_output.shape , output.shape ) @slow def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str: for model_class_name in self.all_model_classes: a_ : Dict = model_class_name.from_pretrained('google/pegasus-large' , from_pt=SCREAMING_SNAKE_CASE__ ) a_ : Optional[Any] = np.ones((1, 1) ) a_ : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE__ ) @slow def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple: a_ : List[Any] = FlaxPegasusForConditionalGeneration.from_pretrained('google/pegasus-xsum' ) a_ : List[Any] = PegasusTokenizer.from_pretrained('google/pegasus-xsum' ) a_ : Optional[Any] = [ ' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.', ' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ', ] a_ : Union[str, Any] = [ 'California\'s largest electricity provider has turned off power to hundreds of thousands of customers.', 'Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.', ] a_ : Any = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors='np' , truncation=SCREAMING_SNAKE_CASE__ , max_length=5_1_2 , padding=SCREAMING_SNAKE_CASE__ ) a_ : List[str] = model.generate(**SCREAMING_SNAKE_CASE__ , num_beams=2 ).sequences a_ : Tuple = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ ) assert tgt_text == decoded
32
"""simple docstring""" import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor lowercase_ = logging.get_logger(__name__) class snake_case ( _lowerCAmelCase ): '''simple docstring''' def __init__( self : Optional[int], *_lowerCamelCase : Union[str, Any], **_lowerCamelCase : Dict ): '''simple docstring''' warnings.warn( '''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use SegformerImageProcessor instead.''', _lowerCamelCase, ) super().__init__(*_lowerCamelCase, **_lowerCamelCase )
266
0
"""simple docstring""" import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import MaskaFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel if is_vision_available(): from transformers import MaskaFormerImageProcessor if is_vision_available(): from PIL import Image class _UpperCAmelCase : def __init__( self : List[str] , A : List[str] , A : List[str]=2 , A : Tuple=True , A : Union[str, Any]=False , A : Optional[int]=10 , A : Dict=3 , A : Union[str, Any]=32 * 8 , A : Union[str, Any]=32 * 8 , A : Optional[int]=4 , A : Any=64 , ) -> str: lowercase_ : List[str] = parent lowercase_ : Any = batch_size lowercase_ : Union[str, Any] = is_training lowercase_ : Optional[Any] = use_auxiliary_loss lowercase_ : List[Any] = num_queries lowercase_ : Any = num_channels lowercase_ : Tuple = min_size lowercase_ : List[str] = max_size lowercase_ : str = num_labels lowercase_ : Tuple = hidden_dim lowercase_ : int = hidden_dim def A ( self : int ) -> Dict: lowercase_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( A ) lowercase_ : Dict = torch.ones([self.batch_size, self.min_size, self.max_size] , device=A ) lowercase_ : Tuple = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=A ) > 0.5 ).float() lowercase_ : Optional[int] = (torch.rand((self.batch_size, self.num_labels) , device=A ) > 0.5).long() lowercase_ : List[str] = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def A ( self : Optional[int] ) -> Optional[Any]: lowercase_ : Optional[Any] = MaskaFormerConfig( hidden_size=self.hidden_dim , ) lowercase_ : str = self.num_queries lowercase_ : Dict = self.num_labels lowercase_ : Union[str, Any] = [1, 1, 1, 1] lowercase_ : Tuple = self.num_channels lowercase_ : int = 64 lowercase_ : Optional[Any] = 1_28 lowercase_ : int = self.hidden_dim lowercase_ : List[Any] = self.hidden_dim lowercase_ : Union[str, Any] = self.hidden_dim return config def A ( self : Optional[int] ) -> Dict: lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : List[Any] = self.prepare_config_and_inputs() lowercase_ : List[str] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask} return config, inputs_dict def A ( self : List[Any] , A : int , A : int ) -> Tuple: lowercase_ : List[str] = output.encoder_hidden_states lowercase_ : Tuple = output.pixel_decoder_hidden_states lowercase_ : int = output.transformer_decoder_hidden_states self.parent.assertTrue(len(A ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(A ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(A ) , config.decoder_layers ) def A ( self : Any , A : Optional[Any] , A : Optional[Any] , A : int , A : int=False ) -> Union[str, Any]: with torch.no_grad(): lowercase_ : List[Any] = MaskaFormerModel(config=A ) model.to(A ) model.eval() lowercase_ : Any = model(pixel_values=A , pixel_mask=A ) lowercase_ : Tuple = model(A , output_hidden_states=A ) self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(A , A ) def A ( self : Optional[int] , A : Union[str, Any] , A : Tuple , A : str , A : Optional[Any] , A : Tuple ) -> Optional[Any]: lowercase_ : Union[str, Any] = MaskaFormerForUniversalSegmentation(config=A ) model.to(A ) model.eval() def comm_check_on_output(A : List[Any] ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): lowercase_ : Dict = model(pixel_values=A , pixel_mask=A ) lowercase_ : Dict = model(A ) comm_check_on_output(A ) lowercase_ : str = model( pixel_values=A , pixel_mask=A , mask_labels=A , class_labels=A ) comm_check_on_output(A ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class _UpperCAmelCase ( _A , _A , unittest.TestCase ): SCREAMING_SNAKE_CASE_ : Tuple = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else () SCREAMING_SNAKE_CASE_ : Optional[int] = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {} SCREAMING_SNAKE_CASE_ : Tuple = False SCREAMING_SNAKE_CASE_ : Union[str, Any] = False SCREAMING_SNAKE_CASE_ : Union[str, Any] = False SCREAMING_SNAKE_CASE_ : int = False def A ( self : Dict ) -> Dict: lowercase_ : Any = MaskaFormerModelTester(self ) lowercase_ : Union[str, Any] = ConfigTester(self , config_class=A , has_text_modality=A ) def A ( self : Union[str, Any] ) -> List[str]: self.config_tester.run_common_tests() def A ( self : Optional[Any] ) -> Optional[Any]: lowercase_ , lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(A , **A , output_hidden_states=A ) def A ( self : List[str] ) -> Any: lowercase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*A ) @unittest.skip(reason='''Mask2Former does not use inputs_embeds''' ) def A ( self : Dict ) -> List[Any]: pass @unittest.skip(reason='''Mask2Former does not have a get_input_embeddings method''' ) def A ( self : Union[str, Any] ) -> Optional[Any]: pass @unittest.skip(reason='''Mask2Former is not a generative model''' ) def A ( self : int ) -> List[Any]: pass @unittest.skip(reason='''Mask2Former does not use token embeddings''' ) def A ( self : Tuple ) -> Union[str, Any]: pass @require_torch_multi_gpu @unittest.skip( reason='''Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def A ( self : Dict ) -> List[str]: pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def A ( self : Tuple ) -> int: pass def A ( self : Optional[int] ) -> int: lowercase_ , lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : List[str] = model_class(A ) lowercase_ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase_ : Any = [*signature.parameters.keys()] lowercase_ : Optional[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , A ) @slow def A ( self : Tuple ) -> Optional[Any]: for model_name in ["facebook/mask2former-swin-small-coco-instance"]: lowercase_ : Tuple = MaskaFormerModel.from_pretrained(A ) self.assertIsNotNone(A ) def A ( self : Tuple ) -> List[Any]: lowercase_ : Dict = (self.model_tester.min_size,) * 2 lowercase_ : Optional[Any] = { '''pixel_values''': torch.randn((2, 3, *size) , device=A ), '''mask_labels''': torch.randn((2, 10, *size) , device=A ), '''class_labels''': torch.zeros(2 , 10 , device=A ).long(), } lowercase_ : int = self.model_tester.get_config() lowercase_ : str = MaskaFormerForUniversalSegmentation(A ).to(A ) lowercase_ : Optional[int] = model(**A ) self.assertTrue(outputs.loss is not None ) def A ( self : str ) -> List[str]: lowercase_ , lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(A , **A , output_hidden_states=A ) def A ( self : List[Any] ) -> Optional[int]: lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : List[str] = model_class(A ).to(A ) lowercase_ : Any = model(**A , output_attentions=A ) self.assertTrue(outputs.attentions is not None ) def A ( self : int ) -> str: if not self.model_tester.is_training: return lowercase_ : Any = self.all_model_classes[1] lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs() lowercase_ : Tuple = model_class(A ) model.to(A ) model.train() lowercase_ : Optional[int] = model(A , mask_labels=A , class_labels=A ).loss loss.backward() def A ( self : Any ) -> Union[str, Any]: lowercase_ : Optional[int] = self.all_model_classes[1] lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs() lowercase_ : int = True lowercase_ : Tuple = True lowercase_ : List[str] = model_class(A ).to(A ) model.train() lowercase_ : Dict = model(A , mask_labels=A , class_labels=A ) lowercase_ : Optional[Any] = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() lowercase_ : str = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() lowercase_ : List[str] = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() lowercase_ : Union[str, Any] = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=A ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) __A : Any = 1E-4 def lowercase ( ): lowercase_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_vision @slow class _UpperCAmelCase ( unittest.TestCase ): @cached_property def A ( self : Optional[int] ) -> str: return "facebook/mask2former-swin-small-coco-instance" @cached_property def A ( self : Optional[int] ) -> List[str]: return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None def A ( self : Tuple ) -> Tuple: lowercase_ : Optional[Any] = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(A ) lowercase_ : Any = self.default_image_processor lowercase_ : int = prepare_img() lowercase_ : Optional[int] = image_processor(A , return_tensors='''pt''' ).to(A ) lowercase_ : Dict = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(A , (1, 3, 3_84, 3_84) ) with torch.no_grad(): lowercase_ : Dict = model(**A ) lowercase_ : List[str] = torch.tensor( [[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(A ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , A , atol=A ) ) lowercase_ : str = torch.tensor( [[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(A ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , A , atol=A ) ) lowercase_ : Tuple = torch.tensor( [[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(A ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , A , atol=A ) ) def A ( self : Union[str, Any] ) -> Dict: lowercase_ : List[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(A ).eval() lowercase_ : Optional[int] = self.default_image_processor lowercase_ : List[str] = prepare_img() lowercase_ : List[Any] = image_processor(A , return_tensors='''pt''' ).to(A ) lowercase_ : Any = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(A , (1, 3, 3_84, 3_84) ) with torch.no_grad(): lowercase_ : List[Any] = model(**A ) # masks_queries_logits lowercase_ : Union[str, Any] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ) lowercase_ : Optional[int] = [ [-8.7839, -9.0056, -8.8121], [-7.4104, -7.0313, -6.5401], [-6.6105, -6.3427, -6.4675], ] lowercase_ : str = torch.tensor(A ).to(A ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , A , atol=A ) ) # class_queries_logits lowercase_ : List[str] = outputs.class_queries_logits self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) ) lowercase_ : Optional[Any] = torch.tensor( [ [1.8324, -8.0835, -4.1922], [0.8450, -9.0050, -3.6053], [0.3045, -7.7293, -3.0275], ] ).to(A ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , A , atol=A ) ) def A ( self : Union[str, Any] ) -> Tuple: lowercase_ : List[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(A ).eval() lowercase_ : List[str] = self.default_image_processor lowercase_ : Union[str, Any] = image_processor( [np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors='''pt''' , ) lowercase_ : Optional[int] = inputs['''pixel_values'''].to(A ) lowercase_ : Union[str, Any] = [el.to(A ) for el in inputs['''mask_labels''']] lowercase_ : str = [el.to(A ) for el in inputs['''class_labels''']] with torch.no_grad(): lowercase_ : List[Any] = model(**A ) self.assertTrue(outputs.loss is not None )
33
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class snake_case ( unittest.TestCase ): '''simple docstring''' def __init__( self : List[Any], _lowerCamelCase : int, _lowerCamelCase : List[Any]=7, _lowerCamelCase : int=3, _lowerCamelCase : Optional[Any]=18, _lowerCamelCase : Any=30, _lowerCamelCase : str=4_00, _lowerCamelCase : int=True, _lowerCamelCase : Union[str, Any]=None, _lowerCamelCase : str=True, ): '''simple docstring''' __A = size if size is not None else {'''height''': 18, '''width''': 18} __A = parent __A = batch_size __A = num_channels __A = image_size __A = min_resolution __A = max_resolution __A = do_resize __A = size __A = apply_ocr def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : Optional[int] = LayoutLMvaImageProcessor if is_pytesseract_available() else None def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = LayoutLMvaImageProcessingTester(self ) @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' __A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCamelCase, '''do_resize''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''size''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''apply_ocr''' ) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' __A = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size, {'''height''': 18, '''width''': 18} ) __A = self.image_processing_class.from_dict(self.image_processor_dict, size=42 ) self.assertEqual(image_processor.size, {'''height''': 42, '''width''': 42} ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' # Initialize image_processing __A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, Image.Image ) # Test not batched input __A = image_processing(image_inputs[0], return_tensors='''pt''' ) self.assertEqual( encoding.pixel_values.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) self.assertIsInstance(encoding.words, _lowerCamelCase ) self.assertIsInstance(encoding.boxes, _lowerCamelCase ) # Test batched __A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' # Initialize image_processing __A = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase, numpify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, np.ndarray ) # Test not batched input __A = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) # Test batched __A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' # Initialize image_processing __A = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase, torchify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, torch.Tensor ) # Test not batched input __A = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) # Test batched __A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) def _SCREAMING_SNAKE_CASE ( self : List[str] ): '''simple docstring''' # with apply_OCR = True __A = LayoutLMvaImageProcessor() from datasets import load_dataset __A = load_dataset('''hf-internal-testing/fixtures_docvqa''', split='''test''' ) __A = Image.open(ds[0]['''file'''] ).convert('''RGB''' ) __A = image_processing(_lowerCamelCase, return_tensors='''pt''' ) self.assertEqual(encoding.pixel_values.shape, (1, 3, 2_24, 2_24) ) self.assertEqual(len(encoding.words ), len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 __A = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231 __A = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words, _lowerCamelCase ) self.assertListEqual(encoding.boxes, _lowerCamelCase ) # with apply_OCR = False __A = LayoutLMvaImageProcessor(apply_ocr=_lowerCamelCase ) __A = image_processing(_lowerCamelCase, return_tensors='''pt''' ) self.assertEqual(encoding.pixel_values.shape, (1, 3, 2_24, 2_24) )
266
0
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, ) else: from .modeling_text_unet import UNetFlatConditionModel from .pipeline_versatile_diffusion import VersatileDiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
34
"""simple docstring""" import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class snake_case ( ctypes.Structure ): '''simple docstring''' A_ : List[str] = [("size", ctypes.c_int), ("visible", ctypes.c_byte)] def lowerCAmelCase ( ): """simple docstring""" if os.name == "nt": __A = CursorInfo() __A = ctypes.windll.kernelaa.GetStdHandle(-1_1 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) ) __A = False ctypes.windll.kernelaa.SetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) ) elif os.name == "posix": sys.stdout.write('''\033[?25l''' ) sys.stdout.flush() def lowerCAmelCase ( ): """simple docstring""" if os.name == "nt": __A = CursorInfo() __A = ctypes.windll.kernelaa.GetStdHandle(-1_1 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) ) __A = True ctypes.windll.kernelaa.SetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) ) elif os.name == "posix": sys.stdout.write('''\033[?25h''' ) sys.stdout.flush() @contextmanager def lowerCAmelCase ( ): """simple docstring""" try: hide_cursor() yield finally: show_cursor()
266
0
'''simple docstring''' import logging import os import sys import warnings from dataclasses import dataclass, field from random import randint from typing import Optional import datasets import evaluate import numpy as np from datasets import DatasetDict, load_dataset import transformers from transformers import ( AutoConfig, AutoFeatureExtractor, AutoModelForAudioClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version __a = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt") def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 16_000 ) -> Union[str, Any]: snake_case__ : int = int(round(sample_rate * max_length ) ) if len(_lowerCAmelCase ) <= sample_length: return wav snake_case__ : Optional[int] = randint(0 , len(_lowerCAmelCase ) - sample_length - 1 ) return wav[random_offset : random_offset + sample_length] @dataclass class UpperCAmelCase_ : """simple docstring""" lowercase = field(default=_a , metadata={"help": "Name of a dataset from the datasets package"} ) lowercase = field( default=_a , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) lowercase = field( default=_a , metadata={"help": "A file containing the training audio paths and labels."} ) lowercase = field( default=_a , metadata={"help": "A file containing the validation audio paths and labels."} ) lowercase = field( default="train" , metadata={ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'" } , ) lowercase = field( default="validation" , metadata={ "help": ( "The name of the training data set split to use (via the datasets library). Defaults to 'validation'" ) } , ) lowercase = field( default="audio" , metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"} , ) lowercase = field( default="label" , metadata={"help": "The name of the dataset column containing the labels. Defaults to 'label'"} ) lowercase = field( default=_a , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } , ) lowercase = field( default=_a , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) } , ) lowercase = field( default=20 , metadata={"help": "Audio clips will be randomly cut to this length during training if the value is set."} , ) @dataclass class UpperCAmelCase_ : """simple docstring""" lowercase = field( default="facebook/wav2vec2-base" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , ) lowercase = field( default=_a , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) lowercase = field( default=_a , metadata={"help": "Where do you want to store the pretrained models downloaded from the Hub"} ) lowercase = field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) lowercase = field( default=_a , metadata={"help": "Name or path of preprocessor config."} ) lowercase = field( default=_a , metadata={"help": "Whether to freeze the feature encoder layers of the model."} ) lowercase = field( default=_a , metadata={"help": "Whether to generate an attention mask in the feature extractor."} ) lowercase = field( default=_a , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) lowercase = field( default=_a , metadata={"help": "Whether to freeze the feature extractor layers of the model."} ) lowercase = field( default=_a , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , ) def lowerCamelCase ( self : str ): if not self.freeze_feature_extractor and self.freeze_feature_encoder: warnings.warn( """The argument `--freeze_feature_extractor` is deprecated and """ """will be removed in a future version. Use `--freeze_feature_encoder`""" """instead. Setting `freeze_feature_encoder==True`.""" , snake_case_ , ) if self.freeze_feature_extractor and not self.freeze_feature_encoder: raise ValueError( """The argument `--freeze_feature_extractor` is deprecated and """ """should not be used in combination with `--freeze_feature_encoder`.""" """Only make use of `--freeze_feature_encoder`.""" ) def __snake_case( ) -> Dict: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. snake_case__ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: snake_case__ , snake_case__ , snake_case__ : Optional[Any] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_audio_classification""" , _lowerCAmelCase , _lowerCAmelCase ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() snake_case__ : Tuple = training_args.get_process_log_level() logger.setLevel(_lowerCAmelCase ) transformers.utils.logging.set_verbosity(_lowerCAmelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} " + f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" ) logger.info(f"Training/evaluation parameters {training_args}" ) # Set seed before initializing model. set_seed(training_args.seed ) # Detecting last checkpoint. snake_case__ : Optional[Any] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: snake_case__ : Optional[Any] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " """Use --overwrite_output_dir to train from scratch.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Initialize our dataset and prepare it for the audio classification task. snake_case__ : Tuple = DatasetDict() snake_case__ : List[Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , ) snake_case__ : str = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , ) if data_args.audio_column_name not in raw_datasets["train"].column_names: raise ValueError( f"--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. " """Make sure to set `--audio_column_name` to the correct audio column - one of """ f"{', '.join(raw_datasets['train'].column_names )}." ) if data_args.label_column_name not in raw_datasets["train"].column_names: raise ValueError( f"--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. " """Make sure to set `--label_column_name` to the correct text column - one of """ f"{', '.join(raw_datasets['train'].column_names )}." ) # Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over # transformer outputs in the classifier, but it doesn't always lead to better accuracy snake_case__ : str = AutoFeatureExtractor.from_pretrained( model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # `datasets` takes care of automatically loading and resampling the audio, # so we just need to set the correct target sampling rate. snake_case__ : Union[str, Any] = raw_datasets.cast_column( data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) ) snake_case__ : Optional[Any] = feature_extractor.model_input_names[0] def train_transforms(_lowerCAmelCase ): snake_case__ : Optional[Any] = [] for audio in batch[data_args.audio_column_name]: snake_case__ : Optional[int] = random_subsample( audio["""array"""] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate ) subsampled_wavs.append(_lowerCAmelCase ) snake_case__ : Optional[int] = feature_extractor(_lowerCAmelCase , sampling_rate=feature_extractor.sampling_rate ) snake_case__ : List[Any] = {model_input_name: inputs.get(_lowerCAmelCase )} snake_case__ : Dict = list(batch[data_args.label_column_name] ) return output_batch def val_transforms(_lowerCAmelCase ): snake_case__ : List[Any] = [audio["""array"""] for audio in batch[data_args.audio_column_name]] snake_case__ : Optional[int] = feature_extractor(_lowerCAmelCase , sampling_rate=feature_extractor.sampling_rate ) snake_case__ : int = {model_input_name: inputs.get(_lowerCAmelCase )} snake_case__ : Any = list(batch[data_args.label_column_name] ) return output_batch # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. snake_case__ : Any = raw_datasets["""train"""].features[data_args.label_column_name].names snake_case__ , snake_case__ : Any = {}, {} for i, label in enumerate(_lowerCAmelCase ): snake_case__ : Tuple = str(_lowerCAmelCase ) snake_case__ : Optional[Any] = label # Load the accuracy metric from the datasets package snake_case__ : Any = evaluate.load("""accuracy""" ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with # `predictions` and `label_ids` fields) and has to return a dictionary string to float. def compute_metrics(_lowerCAmelCase ): snake_case__ : Tuple = np.argmax(eval_pred.predictions , axis=1 ) return metric.compute(predictions=_lowerCAmelCase , references=eval_pred.label_ids ) snake_case__ : Dict = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(_lowerCAmelCase ) , labelaid=_lowerCAmelCase , idalabel=_lowerCAmelCase , finetuning_task="""audio-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) snake_case__ : Dict = AutoModelForAudioClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=_lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) # freeze the convolutional waveform encoder if model_args.freeze_feature_encoder: model.freeze_feature_encoder() if training_args.do_train: if data_args.max_train_samples is not None: snake_case__ : int = ( raw_datasets["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms raw_datasets["train"].set_transform(_lowerCAmelCase , output_all_columns=_lowerCAmelCase ) if training_args.do_eval: if data_args.max_eval_samples is not None: snake_case__ : Optional[int] = ( raw_datasets["""eval"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms raw_datasets["eval"].set_transform(_lowerCAmelCase , output_all_columns=_lowerCAmelCase ) # Initialize our trainer snake_case__ : List[Any] = Trainer( model=_lowerCAmelCase , args=_lowerCAmelCase , train_dataset=raw_datasets["""train"""] if training_args.do_train else None , eval_dataset=raw_datasets["""eval"""] if training_args.do_eval else None , compute_metrics=_lowerCAmelCase , tokenizer=_lowerCAmelCase , ) # Training if training_args.do_train: snake_case__ : Any = None if training_args.resume_from_checkpoint is not None: snake_case__ : List[Any] = training_args.resume_from_checkpoint elif last_checkpoint is not None: snake_case__ : List[str] = last_checkpoint snake_case__ : Tuple = trainer.train(resume_from_checkpoint=_lowerCAmelCase ) trainer.save_model() trainer.log_metrics("""train""" , train_result.metrics ) trainer.save_metrics("""train""" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: snake_case__ : Union[str, Any] = trainer.evaluate() trainer.log_metrics("""eval""" , _lowerCAmelCase ) trainer.save_metrics("""eval""" , _lowerCAmelCase ) # Write model card and (optionally) push to hub snake_case__ : List[str] = { """finetuned_from""": model_args.model_name_or_path, """tasks""": """audio-classification""", """dataset""": data_args.dataset_name, """tags""": ["""audio-classification"""], } if training_args.push_to_hub: trainer.push_to_hub(**_lowerCAmelCase ) else: trainer.create_model_card(**_lowerCAmelCase ) if __name__ == "__main__": main()
35
"""simple docstring""" import argparse import struct import unittest class snake_case : '''simple docstring''' def __init__( self : Optional[int], _lowerCamelCase : bytes ): '''simple docstring''' __A = data # Initialize hash values __A = [ 0X6a_09e_667, 0Xbb_67a_e85, 0X3c_6ef_372, 0Xa5_4ff_53a, 0X51_0e5_27f, 0X9b_056_88c, 0X1f_83d_9ab, 0X5b_e0c_d19, ] # Initialize round constants __A = [ 0X42_8a2_f98, 0X71_374_491, 0Xb5_c0f_bcf, 0Xe9_b5d_ba5, 0X39_56c_25b, 0X59_f11_1f1, 0X92_3f8_2a4, 0Xab_1c5_ed5, 0Xd8_07a_a98, 0X12_835_b01, 0X24_318_5be, 0X55_0c7_dc3, 0X72_be5_d74, 0X80_deb_1fe, 0X9b_dc0_6a7, 0Xc1_9bf_174, 0Xe4_9b6_9c1, 0Xef_be4_786, 0X0f_c19_dc6, 0X24_0ca_1cc, 0X2d_e92_c6f, 0X4a_748_4aa, 0X5c_b0a_9dc, 0X76_f98_8da, 0X98_3e5_152, 0Xa8_31c_66d, 0Xb0_032_7c8, 0Xbf_597_fc7, 0Xc6_e00_bf3, 0Xd5_a79_147, 0X06_ca6_351, 0X14_292_967, 0X27_b70_a85, 0X2e_1b2_138, 0X4d_2c6_dfc, 0X53_380_d13, 0X65_0a7_354, 0X76_6a0_abb, 0X81_c2c_92e, 0X92_722_c85, 0Xa2_bfe_8a1, 0Xa8_1a6_64b, 0Xc2_4b8_b70, 0Xc7_6c5_1a3, 0Xd1_92e_819, 0Xd6_990_624, 0Xf4_0e3_585, 0X10_6aa_070, 0X19_a4c_116, 0X1e_376_c08, 0X27_487_74c, 0X34_b0b_cb5, 0X39_1c0_cb3, 0X4e_d8a_a4a, 0X5b_9cc_a4f, 0X68_2e6_ff3, 0X74_8f8_2ee, 0X78_a56_36f, 0X84_c87_814, 0X8c_c70_208, 0X90_bef_ffa, 0Xa4_506_ceb, 0Xbe_f9a_3f7, 0Xc6_717_8f2, ] __A = self.preprocessing(self.data ) self.final_hash() @staticmethod def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : bytes ): '''simple docstring''' __A = b'''\x80''' + (b'''\x00''' * (63 - (len(_lowerCamelCase ) + 8) % 64)) __A = struct.pack('''>Q''', (len(_lowerCamelCase ) * 8) ) return data + padding + big_endian_integer def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' # Convert into blocks of 64 bytes __A = [ self.preprocessed_data[x : x + 64] for x in range(0, len(self.preprocessed_data ), 64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers __A = list(struct.unpack('''>16L''', _lowerCamelCase ) ) # add 48 0-ed integers words += [0] * 48 __A , __A , __A , __A , __A , __A , __A , __A = self.hashes for index in range(0, 64 ): if index > 15: # modify the zero-ed indexes at the end of the array __A = ( self.ror(words[index - 15], 7 ) ^ self.ror(words[index - 15], 18 ) ^ (words[index - 15] >> 3) ) __A = ( self.ror(words[index - 2], 17 ) ^ self.ror(words[index - 2], 19 ) ^ (words[index - 2] >> 10) ) __A = ( words[index - 16] + sa + words[index - 7] + sa ) % 0X100_000_000 # Compression __A = self.ror(_lowerCamelCase, 6 ) ^ self.ror(_lowerCamelCase, 11 ) ^ self.ror(_lowerCamelCase, 25 ) __A = (e & f) ^ ((~e & 0Xff_fff_fff) & g) __A = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0X100_000_000 __A = self.ror(_lowerCamelCase, 2 ) ^ self.ror(_lowerCamelCase, 13 ) ^ self.ror(_lowerCamelCase, 22 ) __A = (a & b) ^ (a & c) ^ (b & c) __A = (sa + maj) % 0X100_000_000 __A , __A , __A , __A , __A , __A , __A , __A = ( g, f, e, ((d + tempa) % 0X100_000_000), c, b, a, ((tempa + tempa) % 0X100_000_000), ) __A = [a, b, c, d, e, f, g, h] # Modify final values __A = [ ((element + mutated_hash_values[index]) % 0X100_000_000) for index, element in enumerate(self.hashes ) ] __A = ''''''.join([hex(_lowerCamelCase )[2:].zfill(8 ) for value in self.hashes] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : int, _lowerCamelCase : int ): '''simple docstring''' return 0Xff_fff_fff & (value << (32 - rotations)) | (value >> rotations) class snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' import hashlib __A = bytes('''Test String''', '''utf-8''' ) self.assertEqual(SHAaaa(_lowerCamelCase ).hash, hashlib.shaaaa(_lowerCamelCase ).hexdigest() ) def lowerCAmelCase ( ): """simple docstring""" import doctest doctest.testmod() __A = argparse.ArgumentParser() parser.add_argument( '''-s''' , '''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , ) parser.add_argument( '''-f''' , '''--file''' , dest='''input_file''' , help='''Hash contents of a file''' ) __A = parser.parse_args() __A = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , '''rb''' ) as f: __A = f.read() else: __A = bytes(__UpperCamelCase , '''utf-8''' ) print(SHAaaa(__UpperCamelCase ).hash ) if __name__ == "__main__": main()
266
0
import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 _snake_case = sys.version_info >= (3, 10) def A ( _lowerCamelCase=None , _lowerCamelCase=None ): '''simple docstring''' return field(default_factory=lambda: default , metadata=_lowerCamelCase ) @dataclass class UpperCAmelCase_ : lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = 42 @dataclass class UpperCAmelCase_ : lowerCamelCase__ = 42 lowerCamelCase__ = field(default='toto' , metadata={'help': 'help message'}) @dataclass class UpperCAmelCase_ : lowerCamelCase__ = False lowerCamelCase__ = True lowerCamelCase__ = None class UpperCAmelCase_ ( a): lowerCamelCase__ = 'titi' lowerCamelCase__ = 'toto' class UpperCAmelCase_ ( a): lowerCamelCase__ = 'titi' lowerCamelCase__ = 'toto' lowerCamelCase__ = 42 @dataclass class UpperCAmelCase_ : lowerCamelCase__ = "toto" def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Optional[int] = BasicEnum(self.foo) @dataclass class UpperCAmelCase_ : lowerCamelCase__ = "toto" def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Tuple = MixedTypeEnum(self.foo) @dataclass class UpperCAmelCase_ : lowerCamelCase__ = None lowerCamelCase__ = field(default=a , metadata={'help': 'help message'}) lowerCamelCase__ = None lowerCamelCase__ = list_field(default=[]) lowerCamelCase__ = list_field(default=[]) @dataclass class UpperCAmelCase_ : lowerCamelCase__ = list_field(default=[]) lowerCamelCase__ = list_field(default=[1, 2, 3]) lowerCamelCase__ = list_field(default=['Hallo', 'Bonjour', 'Hello']) lowerCamelCase__ = list_field(default=[0.1, 0.2, 0.3]) @dataclass class UpperCAmelCase_ : lowerCamelCase__ = field() lowerCamelCase__ = field() lowerCamelCase__ = field() def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Optional[Any] = BasicEnum(self.required_enum) @dataclass class UpperCAmelCase_ : lowerCamelCase__ = 42 lowerCamelCase__ = field() lowerCamelCase__ = None lowerCamelCase__ = field(default='toto' , metadata={'help': 'help message'}) lowerCamelCase__ = list_field(default=['Hallo', 'Bonjour', 'Hello']) if is_python_no_less_than_3_10: @dataclass class UpperCAmelCase_ : lowerCamelCase__ = False lowerCamelCase__ = True lowerCamelCase__ = None @dataclass class UpperCAmelCase_ : lowerCamelCase__ = None lowerCamelCase__ = field(default=a , metadata={'help': 'help message'}) lowerCamelCase__ = None lowerCamelCase__ = list_field(default=[]) lowerCamelCase__ = list_field(default=[]) class UpperCAmelCase_ ( unittest.TestCase): def snake_case__ ( self, __a, __a): '''simple docstring''' self.assertEqual(len(a._actions), len(b._actions)) for x, y in zip(a._actions, b._actions): _lowerCAmelCase : int = {k: v for k, v in vars(__a).items() if k != "container"} _lowerCAmelCase : Dict = {k: v for k, v in vars(__a).items() if k != "container"} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get("choices", __a) and yy.get("choices", __a): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx["type"](__a), yy["type"](__a)) del xx["type"], yy["type"] self.assertEqual(__a, __a) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : List[str] = HfArgumentParser(__a) _lowerCAmelCase : List[Any] = argparse.ArgumentParser() expected.add_argument("--foo", type=__a, required=__a) expected.add_argument("--bar", type=__a, required=__a) expected.add_argument("--baz", type=__a, required=__a) expected.add_argument("--flag", type=__a, default=__a, const=__a, nargs="?") self.argparsersEqual(__a, __a) _lowerCAmelCase : int = ["--foo", "1", "--baz", "quux", "--bar", "0.5"] ((_lowerCAmelCase) , ) : str = parser.parse_args_into_dataclasses(__a, look_for_args_file=__a) self.assertFalse(example.flag) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Optional[Any] = HfArgumentParser(__a) _lowerCAmelCase : Any = argparse.ArgumentParser() expected.add_argument("--foo", default=42, type=__a) expected.add_argument("--baz", default="toto", type=__a, help="help message") self.argparsersEqual(__a, __a) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : List[str] = argparse.ArgumentParser() expected.add_argument("--foo", type=__a, default=__a, const=__a, nargs="?") expected.add_argument("--baz", type=__a, default=__a, const=__a, nargs="?") # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument("--no_baz", action="store_false", default=__a, dest="baz") expected.add_argument("--opt", type=__a, default=__a) _lowerCAmelCase : List[str] = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(__a) for dataclass_type in dataclass_types: _lowerCAmelCase : Any = HfArgumentParser(__a) self.argparsersEqual(__a, __a) _lowerCAmelCase : int = parser.parse_args([]) self.assertEqual(__a, Namespace(foo=__a, baz=__a, opt=__a)) _lowerCAmelCase : int = parser.parse_args(["--foo", "--no_baz"]) self.assertEqual(__a, Namespace(foo=__a, baz=__a, opt=__a)) _lowerCAmelCase : Optional[int] = parser.parse_args(["--foo", "--baz"]) self.assertEqual(__a, Namespace(foo=__a, baz=__a, opt=__a)) _lowerCAmelCase : Dict = parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"]) self.assertEqual(__a, Namespace(foo=__a, baz=__a, opt=__a)) _lowerCAmelCase : str = parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"]) self.assertEqual(__a, Namespace(foo=__a, baz=__a, opt=__a)) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Optional[int] = HfArgumentParser(__a) _lowerCAmelCase : str = argparse.ArgumentParser() expected.add_argument( "--foo", default="toto", choices=["titi", "toto", 42], type=make_choice_type_function(["titi", "toto", 42]), ) self.argparsersEqual(__a, __a) _lowerCAmelCase : str = parser.parse_args([]) self.assertEqual(args.foo, "toto") _lowerCAmelCase : Any = parser.parse_args_into_dataclasses([])[0] self.assertEqual(enum_ex.foo, MixedTypeEnum.toto) _lowerCAmelCase : Optional[int] = parser.parse_args(["--foo", "titi"]) self.assertEqual(args.foo, "titi") _lowerCAmelCase : List[Any] = parser.parse_args_into_dataclasses(["--foo", "titi"])[0] self.assertEqual(enum_ex.foo, MixedTypeEnum.titi) _lowerCAmelCase : Union[str, Any] = parser.parse_args(["--foo", "42"]) self.assertEqual(args.foo, 42) _lowerCAmelCase : Optional[Any] = parser.parse_args_into_dataclasses(["--foo", "42"])[0] self.assertEqual(enum_ex.foo, MixedTypeEnum.fourtytwo) def snake_case__ ( self): '''simple docstring''' @dataclass class UpperCAmelCase_ : lowerCamelCase__ = "toto" _lowerCAmelCase : Tuple = HfArgumentParser(__a) _lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser() expected.add_argument( "--foo", default="toto", choices=("titi", "toto", 42), type=make_choice_type_function(["titi", "toto", 42]), ) self.argparsersEqual(__a, __a) _lowerCAmelCase : Any = parser.parse_args([]) self.assertEqual(args.foo, "toto") _lowerCAmelCase : Optional[Any] = parser.parse_args(["--foo", "titi"]) self.assertEqual(args.foo, "titi") _lowerCAmelCase : Optional[int] = parser.parse_args(["--foo", "42"]) self.assertEqual(args.foo, 42) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : List[str] = HfArgumentParser(__a) _lowerCAmelCase : Any = argparse.ArgumentParser() expected.add_argument("--foo_int", nargs="+", default=[], type=__a) expected.add_argument("--bar_int", nargs="+", default=[1, 2, 3], type=__a) expected.add_argument("--foo_str", nargs="+", default=["Hallo", "Bonjour", "Hello"], type=__a) expected.add_argument("--foo_float", nargs="+", default=[0.1, 0.2, 0.3], type=__a) self.argparsersEqual(__a, __a) _lowerCAmelCase : Optional[Any] = parser.parse_args([]) self.assertEqual( __a, Namespace(foo_int=[], bar_int=[1, 2, 3], foo_str=["Hallo", "Bonjour", "Hello"], foo_float=[0.1, 0.2, 0.3]), ) _lowerCAmelCase : Optional[Any] = parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split()) self.assertEqual(__a, Namespace(foo_int=[1], bar_int=[2, 3], foo_str=["a", "b", "c"], foo_float=[0.1, 0.7])) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Dict = argparse.ArgumentParser() expected.add_argument("--foo", default=__a, type=__a) expected.add_argument("--bar", default=__a, type=__a, help="help message") expected.add_argument("--baz", default=__a, type=__a) expected.add_argument("--ces", nargs="+", default=[], type=__a) expected.add_argument("--des", nargs="+", default=[], type=__a) _lowerCAmelCase : Optional[Any] = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(__a) for dataclass_type in dataclass_types: _lowerCAmelCase : Tuple = HfArgumentParser(__a) self.argparsersEqual(__a, __a) _lowerCAmelCase : int = parser.parse_args([]) self.assertEqual(__a, Namespace(foo=__a, bar=__a, baz=__a, ces=[], des=[])) _lowerCAmelCase : List[Any] = parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split()) self.assertEqual(__a, Namespace(foo=12, bar=3.14, baz="42", ces=["a", "b", "c"], des=[1, 2, 3])) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = HfArgumentParser(__a) _lowerCAmelCase : Any = argparse.ArgumentParser() expected.add_argument("--required_list", nargs="+", type=__a, required=__a) expected.add_argument("--required_str", type=__a, required=__a) expected.add_argument( "--required_enum", type=make_choice_type_function(["titi", "toto"]), choices=["titi", "toto"], required=__a, ) self.argparsersEqual(__a, __a) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Any = HfArgumentParser(__a) _lowerCAmelCase : Optional[int] = argparse.ArgumentParser() expected.add_argument("--foo", type=__a, required=__a) expected.add_argument( "--required_enum", type=make_choice_type_function(["titi", "toto"]), choices=["titi", "toto"], required=__a, ) expected.add_argument("--opt", type=__a, default=__a) expected.add_argument("--baz", default="toto", type=__a, help="help message") expected.add_argument("--foo_str", nargs="+", default=["Hallo", "Bonjour", "Hello"], type=__a) self.argparsersEqual(__a, __a) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Dict = HfArgumentParser(__a) _lowerCAmelCase : Tuple = { "foo": 12, "bar": 3.14, "baz": "42", "flag": True, } _lowerCAmelCase : List[str] = parser.parse_dict(__a)[0] _lowerCAmelCase : Optional[int] = BasicExample(**__a) self.assertEqual(__a, __a) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : str = HfArgumentParser(__a) _lowerCAmelCase : Optional[int] = { "foo": 12, "bar": 3.14, "baz": "42", "flag": True, "extra": 42, } self.assertRaises(__a, parser.parse_dict, __a, allow_extra_keys=__a) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Tuple = HfArgumentParser(__a) _lowerCAmelCase : Tuple = { "foo": 12, "bar": 3.14, "baz": "42", "flag": True, } with tempfile.TemporaryDirectory() as tmp_dir: _lowerCAmelCase : Dict = os.path.join(__a, "temp_json") os.mkdir(__a) with open(temp_local_path + ".json", "w+") as f: json.dump(__a, __a) _lowerCAmelCase : Optional[int] = parser.parse_yaml_file(Path(temp_local_path + ".json"))[0] _lowerCAmelCase : str = BasicExample(**__a) self.assertEqual(__a, __a) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : str = HfArgumentParser(__a) _lowerCAmelCase : int = { "foo": 12, "bar": 3.14, "baz": "42", "flag": True, } with tempfile.TemporaryDirectory() as tmp_dir: _lowerCAmelCase : Any = os.path.join(__a, "temp_yaml") os.mkdir(__a) with open(temp_local_path + ".yaml", "w+") as f: yaml.dump(__a, __a) _lowerCAmelCase : str = parser.parse_yaml_file(Path(temp_local_path + ".yaml"))[0] _lowerCAmelCase : Optional[int] = BasicExample(**__a) self.assertEqual(__a, __a) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : List[str] = HfArgumentParser(__a) self.assertIsNotNone(__a)
36
"""simple docstring""" import faiss # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import requests # noqa: F401 # Here to have a nice missing dependency error message early on import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on from mauve import compute_mauve # From: mauve-text import datasets lowercase_ = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n' lowercase_ = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n' lowercase_ = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case ( datasets.Metric ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, homepage='''https://github.com/krishnap25/mauve''', inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { '''predictions''': datasets.Value('''string''', id='''sequence''' ), '''references''': datasets.Value('''string''', id='''sequence''' ), } ), codebase_urls=['''https://github.com/krishnap25/mauve'''], reference_urls=[ '''https://arxiv.org/abs/2102.01454''', '''https://github.com/krishnap25/mauve''', ], ) def _SCREAMING_SNAKE_CASE ( self : int, _lowerCamelCase : str, _lowerCamelCase : Optional[Any], _lowerCamelCase : Any=None, _lowerCamelCase : Tuple=None, _lowerCamelCase : Optional[Any]=None, _lowerCamelCase : Union[str, Any]=None, _lowerCamelCase : str="auto", _lowerCamelCase : Union[str, Any]=-1, _lowerCamelCase : List[str]=0.9, _lowerCamelCase : int=5, _lowerCamelCase : Tuple=5_00, _lowerCamelCase : Union[str, Any]="gpt2-large", _lowerCamelCase : int=-1, _lowerCamelCase : Union[str, Any]=10_24, _lowerCamelCase : Union[str, Any]=25, _lowerCamelCase : str=5, _lowerCamelCase : Any=True, _lowerCamelCase : Union[str, Any]=25, ): '''simple docstring''' __A = compute_mauve( p_text=_lowerCamelCase, q_text=_lowerCamelCase, p_features=_lowerCamelCase, q_features=_lowerCamelCase, p_tokens=_lowerCamelCase, q_tokens=_lowerCamelCase, num_buckets=_lowerCamelCase, pca_max_data=_lowerCamelCase, kmeans_explained_var=_lowerCamelCase, kmeans_num_redo=_lowerCamelCase, kmeans_max_iter=_lowerCamelCase, featurize_model_name=_lowerCamelCase, device_id=_lowerCamelCase, max_text_length=_lowerCamelCase, divergence_curve_discretization_size=_lowerCamelCase, mauve_scaling_factor=_lowerCamelCase, verbose=_lowerCamelCase, seed=_lowerCamelCase, ) return out
266
0
'''simple docstring''' import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip _lowerCAmelCase = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) transformers_logging.set_verbosity_info() def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" if "token" in model_name_or_path: return "rag_token" if "sequence" in model_name_or_path: return "rag_sequence" if "bart" in model_name_or_path: return "bart" return None def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" return max(metric_fn(UpperCamelCase , UpperCamelCase ) for gt in ground_truths ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : int = [line.strip() for line in open(UpperCamelCase , """r""" ).readlines()] lowerCAmelCase__ : Union[str, Any] = [] if args.gold_data_mode == "qa": lowerCAmelCase__ : Tuple = pd.read_csv(UpperCamelCase , sep="""\t""" , header=UpperCamelCase ) for answer_list in data[1]: lowerCAmelCase__ : Optional[Any] = ast.literal_eval(UpperCamelCase ) answers.append(UpperCamelCase ) else: lowerCAmelCase__ : str = [line.strip() for line in open(UpperCamelCase , """r""" ).readlines()] lowerCAmelCase__ : Tuple = [[reference] for reference in references] lowerCAmelCase__ : Optional[Any] = 0 for prediction, ground_truths in zip(UpperCamelCase , UpperCamelCase ): total += 1 em += metric_max_over_ground_truths(UpperCamelCase , UpperCamelCase , UpperCamelCase ) fa += metric_max_over_ground_truths(UpperCamelCase , UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : str = 100.0 * em / total lowerCAmelCase__ : Optional[int] = 100.0 * fa / total logger.info(f"""F1: {fa:.2f}""" ) logger.info(f"""EM: {em:.2f}""" ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : str = args.k lowerCAmelCase__ : Optional[int] = [line.strip() for line in open(UpperCamelCase , """r""" ).readlines()] lowerCAmelCase__ : Optional[int] = [line.strip() for line in open(UpperCamelCase , """r""" ).readlines()] lowerCAmelCase__ : str = 0 for hypo, reference in zip(UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : Union[str, Any] = set(hypo.split("""\t""" )[:k] ) lowerCAmelCase__ : List[Any] = set(reference.split("""\t""" ) ) total += 1 em += len(hypo_provenance & ref_provenance ) / k lowerCAmelCase__ : Optional[int] = 100.0 * em / total logger.info(f"""Precision@{k}: {em: .2f}""" ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" def strip_title(UpperCamelCase ): if title.startswith("""\"""" ): lowerCAmelCase__ : Optional[Any] = title[1:] if title.endswith("""\"""" ): lowerCAmelCase__ : int = title[:-1] return title lowerCAmelCase__ : Tuple = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( UpperCamelCase , return_tensors="""pt""" , padding=UpperCamelCase , truncation=UpperCamelCase , )["""input_ids"""].to(args.device ) lowerCAmelCase__ : Optional[int] = rag_model.rag.question_encoder(UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = question_enc_outputs[0] lowerCAmelCase__ : List[str] = rag_model.retriever( UpperCamelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="""pt""" , ) lowerCAmelCase__ : List[str] = rag_model.retriever.index.get_doc_dicts(result.doc_ids ) lowerCAmelCase__ : List[Any] = [] for docs in all_docs: lowerCAmelCase__ : Tuple = [strip_title(UpperCamelCase ) for title in docs["""title"""]] provenance_strings.append("""\t""".join(UpperCamelCase ) ) return provenance_strings def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" with torch.no_grad(): lowerCAmelCase__ : str = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( UpperCamelCase , return_tensors="""pt""" , padding=UpperCamelCase , truncation=UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = inputs_dict.input_ids.to(args.device ) lowerCAmelCase__ : List[Any] = inputs_dict.attention_mask.to(args.device ) lowerCAmelCase__ : Optional[Any] = rag_model.generate( # rag_model overwrites generate UpperCamelCase , attention_mask=UpperCamelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=UpperCamelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , ) lowerCAmelCase__ : Optional[Any] = rag_model.retriever.generator_tokenizer.batch_decode(UpperCamelCase , skip_special_tokens=UpperCamelCase ) if args.print_predictions: for q, a in zip(UpperCamelCase , UpperCamelCase ): logger.info("""Q: {} - A: {}""".format(UpperCamelCase , UpperCamelCase ) ) return answers def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" lowerCAmelCase__ : Tuple = argparse.ArgumentParser() parser.add_argument( """--model_type""" , choices=["""rag_sequence""", """rag_token""", """bart"""] , type=UpperCamelCase , help=( """RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the""" """ model_name_or_path""" ) , ) parser.add_argument( """--index_name""" , default=UpperCamelCase , choices=["""exact""", """compressed""", """legacy"""] , type=UpperCamelCase , help="""RAG model retriever type""" , ) parser.add_argument( """--index_path""" , default=UpperCamelCase , type=UpperCamelCase , help="""Path to the retrieval index""" , ) parser.add_argument("""--n_docs""" , default=5 , type=UpperCamelCase , help="""Number of retrieved docs""" ) parser.add_argument( """--model_name_or_path""" , default=UpperCamelCase , type=UpperCamelCase , required=UpperCamelCase , help="""Path to pretrained checkpoints or model identifier from huggingface.co/models""" , ) parser.add_argument( """--eval_mode""" , choices=["""e2e""", """retrieval"""] , default="""e2e""" , type=UpperCamelCase , help=( """Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates""" """ precision@k.""" ) , ) parser.add_argument("""--k""" , default=1 , type=UpperCamelCase , help="""k for the precision@k calculation""" ) parser.add_argument( """--evaluation_set""" , default=UpperCamelCase , type=UpperCamelCase , required=UpperCamelCase , help="""Path to a file containing evaluation samples""" , ) parser.add_argument( """--gold_data_path""" , default=UpperCamelCase , type=UpperCamelCase , required=UpperCamelCase , help="""Path to a tab-separated file with gold samples""" , ) parser.add_argument( """--gold_data_mode""" , default="""qa""" , type=UpperCamelCase , choices=["""qa""", """ans"""] , help=( """Format of the gold data file""" """qa - a single line in the following format: question [tab] answer_list""" """ans - a single line of the gold file contains the expected answer string""" ) , ) parser.add_argument( """--predictions_path""" , type=UpperCamelCase , default="""predictions.txt""" , help="""Name of the predictions file, to be stored in the checkpoints directory""" , ) parser.add_argument( """--eval_all_checkpoints""" , action="""store_true""" , help="""Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number""" , ) parser.add_argument( """--eval_batch_size""" , default=8 , type=UpperCamelCase , help="""Batch size per GPU/CPU for evaluation.""" , ) parser.add_argument( """--recalculate""" , help="""Recalculate predictions even if the prediction file exists""" , action="""store_true""" , ) parser.add_argument( """--num_beams""" , default=4 , type=UpperCamelCase , help="""Number of beams to be used when generating answers""" , ) parser.add_argument("""--min_length""" , default=1 , type=UpperCamelCase , help="""Min length of the generated answers""" ) parser.add_argument("""--max_length""" , default=50 , type=UpperCamelCase , help="""Max length of the generated answers""" ) parser.add_argument( """--print_predictions""" , action="""store_true""" , help="""If True, prints predictions while evaluating.""" , ) parser.add_argument( """--print_docs""" , action="""store_true""" , help="""If True, prints docs retried while generating.""" , ) lowerCAmelCase__ : Dict = parser.parse_args() lowerCAmelCase__ : Tuple = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) return args def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : List[str] = {} if args.model_type is None: lowerCAmelCase__ : Any = infer_model_type(args.model_name_or_path ) assert args.model_type is not None if args.model_type.startswith("""rag""" ): lowerCAmelCase__ : str = RagTokenForGeneration if args.model_type == """rag_token""" else RagSequenceForGeneration lowerCAmelCase__ : Any = args.n_docs if args.index_name is not None: lowerCAmelCase__ : Any = args.index_name if args.index_path is not None: lowerCAmelCase__ : Dict = args.index_path else: lowerCAmelCase__ : Tuple = BartForConditionalGeneration lowerCAmelCase__ : List[Any] = ( [f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()] if args.eval_all_checkpoints else [args.model_name_or_path] ) logger.info("""Evaluate the following checkpoints: %s""" , UpperCamelCase ) lowerCAmelCase__ : Optional[int] = get_scores if args.eval_mode == """e2e""" else get_precision_at_k lowerCAmelCase__ : Tuple = evaluate_batch_eae if args.eval_mode == """e2e""" else evaluate_batch_retrieval for checkpoint in checkpoints: if os.path.exists(args.predictions_path ) and (not args.recalculate): logger.info("""Calculating metrics based on an existing predictions file: {}""".format(args.predictions_path ) ) score_fn(UpperCamelCase , args.predictions_path , args.gold_data_path ) continue logger.info("""***** Running evaluation for {} *****""".format(UpperCamelCase ) ) logger.info(""" Batch size = %d""" , args.eval_batch_size ) logger.info(""" Predictions will be stored under {}""".format(args.predictions_path ) ) if args.model_type.startswith("""rag""" ): lowerCAmelCase__ : str = RagRetriever.from_pretrained(UpperCamelCase , **UpperCamelCase ) lowerCAmelCase__ : List[str] = model_class.from_pretrained(UpperCamelCase , retriever=UpperCamelCase , **UpperCamelCase ) model.retriever.init_retrieval() else: lowerCAmelCase__ : Dict = model_class.from_pretrained(UpperCamelCase , **UpperCamelCase ) model.to(args.device ) with open(args.evaluation_set , """r""" ) as eval_file, open(args.predictions_path , """w""" ) as preds_file: lowerCAmelCase__ : int = [] for line in tqdm(UpperCamelCase ): questions.append(line.strip() ) if len(UpperCamelCase ) == args.eval_batch_size: lowerCAmelCase__ : Tuple = evaluate_batch_fn(UpperCamelCase , UpperCamelCase , UpperCamelCase ) preds_file.write("""\n""".join(UpperCamelCase ) + """\n""" ) preds_file.flush() lowerCAmelCase__ : Optional[int] = [] if len(UpperCamelCase ) > 0: lowerCAmelCase__ : Tuple = evaluate_batch_fn(UpperCamelCase , UpperCamelCase , UpperCamelCase ) preds_file.write("""\n""".join(UpperCamelCase ) ) preds_file.flush() score_fn(UpperCamelCase , args.predictions_path , args.gold_data_path ) if __name__ == "__main__": _lowerCAmelCase = get_args() main(args)
37
"""simple docstring""" import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs lowercase_ = imread(R'digital_image_processing/image_data/lena_small.jpg') lowercase_ = cvtColor(img, COLOR_BGR2GRAY) def lowerCAmelCase ( ): """simple docstring""" __A = cn.convert_to_negative(__UpperCamelCase ) # assert negative_img array for at least one True assert negative_img.any() def lowerCAmelCase ( ): """simple docstring""" with Image.open('''digital_image_processing/image_data/lena_small.jpg''' ) as img: # Work around assertion for response assert str(cc.change_contrast(__UpperCamelCase , 1_1_0 ) ).startswith( '''<PIL.Image.Image image mode=RGB size=100x100 at''' ) def lowerCAmelCase ( ): """simple docstring""" __A = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def lowerCAmelCase ( ): """simple docstring""" __A = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0 ) # assert ambiguous array for all == True assert canny_img.all() __A = canny.canny(__UpperCamelCase ) # assert canny array for at least one True assert canny_array.any() def lowerCAmelCase ( ): """simple docstring""" assert gg.gaussian_filter(__UpperCamelCase , 5 , sigma=0.9 ).all() def lowerCAmelCase ( ): """simple docstring""" __A = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) __A = conv.img_convolve(__UpperCamelCase , __UpperCamelCase ).astype(__UpperCamelCase ) assert res.any() def lowerCAmelCase ( ): """simple docstring""" assert med.median_filter(__UpperCamelCase , 3 ).any() def lowerCAmelCase ( ): """simple docstring""" __A , __A = sob.sobel_filter(__UpperCamelCase ) assert grad.any() and theta.any() def lowerCAmelCase ( ): """simple docstring""" __A = sp.make_sepia(__UpperCamelCase , 2_0 ) assert sepia.all() def lowerCAmelCase ( __UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" ): """simple docstring""" __A = bs.Burkes(imread(__UpperCamelCase , 1 ) , 1_2_0 ) burkes.process() assert burkes.output_img.any() def lowerCAmelCase ( __UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" , ): """simple docstring""" __A = rs.NearestNeighbour(imread(__UpperCamelCase , 1 ) , 4_0_0 , 2_0_0 ) nn.process() assert nn.output.any() def lowerCAmelCase ( ): """simple docstring""" __A = '''digital_image_processing/image_data/lena.jpg''' # Reading the image and converting it to grayscale. __A = imread(__UpperCamelCase , 0 ) # Test for get_neighbors_pixel function() return not None __A = 0 __A = 0 __A = image[x_coordinate][y_coordinate] __A = lbp.get_neighbors_pixel( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image __A = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): __A = lbp.local_binary_value(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) assert lbp_image.any()
266
0
import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py UpperCAmelCase_ : List[str] = '''\ @INPROCEEDINGS{Papineni02bleu:a, author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu}, title = {BLEU: a Method for Automatic Evaluation of Machine Translation}, booktitle = {}, year = {2002}, pages = {311--318} } @inproceedings{lin-och-2004-orange, title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation", author = "Lin, Chin-Yew and Och, Franz Josef", booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics", month = "aug 23{--}aug 27", year = "2004", address = "Geneva, Switzerland", publisher = "COLING", url = "https://www.aclweb.org/anthology/C04-1072", pages = "501--507", } ''' UpperCAmelCase_ : Optional[Any] = '''\ BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another. Quality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation, the better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and remains one of the most popular automated and inexpensive metrics. Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations. Those scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness are not taken into account[citation needed]. BLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1 representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional reference translations will increase the BLEU score. ''' UpperCAmelCase_ : Union[str, Any] = ''' Computes BLEU score of translated segments against one or more references. Args: predictions: list of translations to score. Each translation should be tokenized into a list of tokens. references: list of lists of references for each translation. Each reference should be tokenized into a list of tokens. max_order: Maximum n-gram order to use when computing BLEU score. smooth: Whether or not to apply Lin et al. 2004 smoothing. Returns: \'bleu\': bleu score, \'precisions\': geometric mean of n-gram precisions, \'brevity_penalty\': brevity penalty, \'length_ratio\': ratio of lengths, \'translation_length\': translation_length, \'reference_length\': reference_length Examples: >>> predictions = [ ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample ... ] >>> references = [ ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references) ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference) ... ] >>> bleu = datasets.load_metric("bleu") >>> results = bleu.compute(predictions=predictions, references=references) >>> print(results["bleu"]) 1.0 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _SCREAMING_SNAKE_CASE ( datasets.Metric ): def _A ( self : int ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ), """references""": datasets.Sequence( datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ), } ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[ """https://en.wikipedia.org/wiki/BLEU""", """https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""", ] , ) def _A ( self : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : Optional[Any]=False ): UpperCamelCase :List[Any] = compute_bleu( reference_corpus=__lowerCamelCase , translation_corpus=__lowerCamelCase , max_order=__lowerCamelCase , smooth=__lowerCamelCase ) ((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) :List[Any] = score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
38
"""simple docstring""" import itertools import random import unittest import numpy as np from transformers import ASTFeatureExtractor from transformers.testing_utils import require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin lowercase_ = random.Random() if is_torch_available(): import torch def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase=1.0 , __UpperCamelCase=None , __UpperCamelCase=None ): """simple docstring""" if rng is None: __A = global_rng __A = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class snake_case ( unittest.TestCase ): '''simple docstring''' def __init__( self : Any, _lowerCamelCase : List[str], _lowerCamelCase : Any=7, _lowerCamelCase : Optional[int]=4_00, _lowerCamelCase : Optional[int]=20_00, _lowerCamelCase : Dict=1, _lowerCamelCase : Optional[Any]=0.0, _lowerCamelCase : int=1_60_00, _lowerCamelCase : Optional[int]=True, _lowerCamelCase : Dict=True, ): '''simple docstring''' __A = parent __A = batch_size __A = min_seq_length __A = max_seq_length __A = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) __A = feature_size __A = padding_value __A = sampling_rate __A = return_attention_mask __A = do_normalize def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : Optional[Any]=False, _lowerCamelCase : int=False ): '''simple docstring''' def _flatten(_lowerCamelCase : List[str] ): return list(itertools.chain(*_lowerCamelCase ) ) if equal_length: __A = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size __A = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff ) ] if numpify: __A = [np.asarray(_lowerCamelCase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : int = ASTFeatureExtractor def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' __A = ASTFeatureExtractionTester(self ) def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' # Tests that all call wrap to encode_plus and batch_encode_plus __A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 __A = [floats_list((1, x) )[0] for x in range(8_00, 14_00, 2_00 )] __A = [np.asarray(_lowerCamelCase ) for speech_input in speech_inputs] # Test not batched input __A = feat_extract(speech_inputs[0], return_tensors='''np''' ).input_values __A = feat_extract(np_speech_inputs[0], return_tensors='''np''' ).input_values self.assertTrue(np.allclose(_lowerCamelCase, _lowerCamelCase, atol=1e-3 ) ) # Test batched __A = feat_extract(_lowerCamelCase, padding=_lowerCamelCase, return_tensors='''np''' ).input_values __A = feat_extract(_lowerCamelCase, padding=_lowerCamelCase, return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(_lowerCamelCase, _lowerCamelCase ): self.assertTrue(np.allclose(_lowerCamelCase, _lowerCamelCase, atol=1e-3 ) ) # Test 2-D numpy arrays are batched. __A = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)] __A = np.asarray(_lowerCamelCase ) __A = feat_extract(_lowerCamelCase, return_tensors='''np''' ).input_values __A = feat_extract(_lowerCamelCase, return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(_lowerCamelCase, _lowerCamelCase ): self.assertTrue(np.allclose(_lowerCamelCase, _lowerCamelCase, atol=1e-3 ) ) @require_torch def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' import torch __A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __A = np.random.rand(1_00 ).astype(np.floataa ) __A = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: __A = feature_extractor.pad([{'''input_values''': inputs}], return_tensors='''np''' ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) __A = feature_extractor.pad([{'''input_values''': inputs}], return_tensors='''pt''' ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : Union[str, Any] ): '''simple docstring''' from datasets import load_dataset __A = load_dataset('''hf-internal-testing/librispeech_asr_dummy''', '''clean''', split='''validation''' ) # automatic decoding with librispeech __A = ds.sort('''id''' ).select(range(_lowerCamelCase ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] @require_torch def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' # fmt: off __A = torch.tensor( [-0.98_94, -1.27_76, -0.90_66, -1.27_76, -0.93_49, -1.26_09, -1.03_86, -1.27_76, -1.15_61, -1.27_76, -1.20_52, -1.27_23, -1.21_90, -1.21_32, -1.27_76, -1.11_33, -1.19_53, -1.13_43, -1.15_84, -1.22_03, -1.17_70, -1.24_74, -1.23_81, -1.19_36, -0.92_70, -0.83_17, -0.80_49, -0.77_06, -0.75_65, -0.78_69] ) # fmt: on __A = self._load_datasamples(1 ) __A = ASTFeatureExtractor() __A = feature_extractor(_lowerCamelCase, return_tensors='''pt''' ).input_values self.assertEquals(input_values.shape, (1, 10_24, 1_28) ) self.assertTrue(torch.allclose(input_values[0, 0, :30], _lowerCamelCase, atol=1e-4 ) )
266
0
from __future__ import annotations class __lowerCamelCase : """simple docstring""" def __init__( self , UpperCAmelCase , UpperCAmelCase ): """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = text, pattern _UpperCAmelCase , _UpperCAmelCase = len(UpperCAmelCase ), len(UpperCAmelCase ) def UpperCamelCase ( self , UpperCAmelCase ): """simple docstring""" for i in range(self.patLen - 1 , -1 , -1 ): if char == self.pattern[i]: return i return -1 def UpperCamelCase ( self , UpperCAmelCase ): """simple docstring""" for i in range(self.patLen - 1 , -1 , -1 ): if self.pattern[i] != self.text[current_pos + i]: return current_pos + i return -1 def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = [] for i in range(self.textLen - self.patLen + 1 ): _UpperCAmelCase = self.mismatch_in_text(UpperCAmelCase ) if mismatch_index == -1: positions.append(UpperCAmelCase ) else: _UpperCAmelCase = self.match_in_pattern(self.text[mismatch_index] ) _UpperCAmelCase = ( mismatch_index - match_index ) # shifting index lgtm [py/multiple-definition] return positions _a = '''ABAABA''' _a = '''AB''' _a = BoyerMooreSearch(text, pattern) _a = bms.bad_character_heuristic() if len(positions) == 0: print('''No match found''') else: print('''Pattern found in following positions: ''') print(positions)
39
"""simple docstring""" def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = current_set.copy() for row_index, row in enumerate(__UpperCamelCase ): __A = row[0] for column_index, column in enumerate(__UpperCamelCase ): if magnitude == 0: __A = column continue __A = column / magnitude # Subtract to cancel term __A = current_set[0] __A = [first_row] __A = current_set[1::] for row in current_set: __A = [] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(__UpperCamelCase ) continue for column_index in range(len(__UpperCamelCase ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(__UpperCamelCase ) # Create next recursion iteration set if len(final_set[0] ) != 3: __A = final_set[0] __A = [] __A = [] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) __A = simplify(__UpperCamelCase ) for i in range(len(__UpperCamelCase ) ): resultant[i].insert(0 , current_first_column[i] ) resultant.insert(0 , __UpperCamelCase ) __A = resultant return final_set def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" if len(__UpperCamelCase ) == 0: raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) __A = len(__UpperCamelCase ) + 1 if any(len(__UpperCamelCase ) != _length for item in equations ): raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) for row in equations: if any(not isinstance(__UpperCamelCase , (int, float) ) for column in row ): raise ValueError('''solve_simultaneous() requires lists of integers''' ) if len(__UpperCamelCase ) == 1: return [equations[0][-1] / equations[0][0]] __A = equations.copy() if any(0 in row for row in data_set ): __A = data_set.copy() __A = [] for row_index, row in enumerate(__UpperCamelCase ): if 0 not in row: __A = data_set.pop(__UpperCamelCase ) break if not full_row: raise ValueError('''solve_simultaneous() requires at least 1 full equation''' ) data_set.insert(0 , __UpperCamelCase ) __A = data_set.copy() __A = simplify(__UpperCamelCase ) __A = simplified[::-1] __A = [] for row in simplified: __A = row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue __A = row.copy()[: len(__UpperCamelCase ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(__UpperCamelCase ) == 0: solutions.append(0 ) continue __A = temp_row[1::] __A = temp_row[::-1] for column_index, column in enumerate(__UpperCamelCase ): current_solution -= column * solutions[column_index] solutions.append(__UpperCamelCase ) __A = [] for item in solutions: final.append(float(round(__UpperCamelCase , 5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() lowercase_ = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
266
0
"""simple docstring""" import json import os import re import sys import urllib.request import requests from bsa import BeautifulSoup __lowercase = { """User-Agent""": """Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36""" """ (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582""" } def lowercase ( A_ = "dhaka" , A_ = 5 )-> int: '''simple docstring''' a : str = min(A_ , 50 ) # Prevent abuse! a : Optional[Any] = { "q": query, "tbm": "isch", "hl": "en", "ijn": "0", } a : Tuple = requests.get("https://www.google.com/search" , params=A_ , headers=A_ ) a : Union[str, Any] = BeautifulSoup(html.text , "html.parser" ) a : Union[str, Any] = "".join( re.findall(R"AF_initDataCallback\(([^<]+)\);" , str(soup.select("script" ) ) ) ) a : Any = json.dumps(A_ ) a : Tuple = json.loads(A_ ) a : List[str] = re.findall( R"\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\"," , A_ , ) if not matched_google_image_data: return 0 a : Optional[Any] = re.sub( R"\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]" , "" , str(A_ ) , ) a : Any = re.findall( R"(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]" , A_ , ) for index, fixed_full_res_image in enumerate(A_ ): if index >= max_images: return index a : Optional[Any] = bytes(A_ , "ascii" ).decode( "unicode-escape" ) a : int = bytes(A_ , "ascii" ).decode( "unicode-escape" ) a : Any = urllib.request.build_opener() a : Dict = [ ( "User-Agent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36" " (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582", ) ] urllib.request.install_opener(A_ ) a : List[Any] = F'''query_{query.replace(" " , "_" )}''' if not os.path.exists(A_ ): os.makedirs(A_ ) urllib.request.urlretrieve( # noqa: S310 A_ , F'''{path_name}/original_size_img_{index}.jpg''' ) return index if __name__ == "__main__": try: __lowercase = download_images_from_google_query(sys.argv[1]) print(f'''{image_count} images were downloaded to disk.''') except IndexError: print("""Please provide a search term.""") raise
40
"""simple docstring""" from __future__ import annotations from typing import Any def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" if not postfix_notation: return 0 __A = {'''+''', '''-''', '''*''', '''/'''} __A = [] for token in postfix_notation: if token in operations: __A , __A = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(__UpperCamelCase ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
266
0