code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = '▁' lowercase_ = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'} lowercase_ = { 'vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model', }, 'monolingual_vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt', }, } lowercase_ = {'vinai/bartpho-syllable': 1024} class snake_case ( _lowerCAmelCase ): '''simple docstring''' A_ : str = VOCAB_FILES_NAMES A_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP A_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A_ : Tuple = ["input_ids", "attention_mask"] def __init__( self : List[str], _lowerCamelCase : Union[str, Any], _lowerCamelCase : List[Any], _lowerCamelCase : List[str]="<s>", _lowerCamelCase : Union[str, Any]="</s>", _lowerCamelCase : Any="</s>", _lowerCamelCase : Dict="<s>", _lowerCamelCase : Dict="<unk>", _lowerCamelCase : List[Any]="<pad>", _lowerCamelCase : Tuple="<mask>", _lowerCamelCase : Optional[Dict[str, Any]] = None, **_lowerCamelCase : List[Any], ): '''simple docstring''' # Mask token behave like a normal word, i.e. include the space before it __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else mask_token __A = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_lowerCamelCase, eos_token=_lowerCamelCase, unk_token=_lowerCamelCase, sep_token=_lowerCamelCase, cls_token=_lowerCamelCase, pad_token=_lowerCamelCase, mask_token=_lowerCamelCase, sp_model_kwargs=self.sp_model_kwargs, **_lowerCamelCase, ) __A = vocab_file __A = monolingual_vocab_file __A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_lowerCamelCase ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility __A = {} __A = 0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(_lowerCamelCase ) not in self.fairseq_tokens_to_ids: __A = cnt cnt += 1 with open(_lowerCamelCase, '''r''', encoding='''utf-8''' ) as f: for line in f.readlines(): __A = line.strip().split()[0] __A = len(self.fairseq_tokens_to_ids ) if str(_lowerCamelCase ) not in self.fairseq_tokens_to_ids: __A = len(self.fairseq_tokens_to_ids ) __A = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : Union[str, Any] ): '''simple docstring''' __A = self.__dict__.copy() __A = None __A = self.sp_model.serialized_model_proto() return state def __setstate__( self : Union[str, Any], _lowerCamelCase : Dict ): '''simple docstring''' __A = d # for backward compatibility if not hasattr(self, '''sp_model_kwargs''' ): __A = {} __A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : List[int], _lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __A = [self.cls_token_id] __A = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : List[int], _lowerCamelCase : Optional[List[int]] = None, _lowerCamelCase : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCamelCase, token_ids_a=_lowerCamelCase, already_has_special_tokens=_lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(_lowerCamelCase )) + [1] return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1] def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : List[int], _lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' __A = [self.sep_token_id] __A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' return len(self.fairseq_ids_to_tokens ) def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' __A = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : str ): '''simple docstring''' return self.sp_model.encode(_lowerCamelCase, out_type=_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : List[Any] ): '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : Optional[int] ): '''simple docstring''' return self.fairseq_ids_to_tokens[index] def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : Union[str, Any] ): '''simple docstring''' __A = ''''''.join(_lowerCamelCase ).replace(_lowerCamelCase, ''' ''' ).strip() return out_string def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : str, _lowerCamelCase : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(_lowerCamelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __A = os.path.join( _lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) __A = os.path.join( _lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''], ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file, _lowerCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(_lowerCamelCase, '''wb''' ) as fi: __A = self.sp_model.serialized_model_proto() fi.write(_lowerCamelCase ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( _lowerCamelCase ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file, _lowerCamelCase ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(_lowerCamelCase, '''w''', encoding='''utf-8''' ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f'{str(_lowerCamelCase )} \n' ) return out_vocab_file, out_monolingual_vocab_file
266
"""simple docstring""" def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = current_set.copy() for row_index, row in enumerate(__UpperCamelCase ): __A = row[0] for column_index, column in enumerate(__UpperCamelCase ): if magnitude == 0: __A = column continue __A = column / magnitude # Subtract to cancel term __A = current_set[0] __A = [first_row] __A = current_set[1::] for row in current_set: __A = [] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(__UpperCamelCase ) continue for column_index in range(len(__UpperCamelCase ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(__UpperCamelCase ) # Create next recursion iteration set if len(final_set[0] ) != 3: __A = final_set[0] __A = [] __A = [] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) __A = simplify(__UpperCamelCase ) for i in range(len(__UpperCamelCase ) ): resultant[i].insert(0 , current_first_column[i] ) resultant.insert(0 , __UpperCamelCase ) __A = resultant return final_set def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" if len(__UpperCamelCase ) == 0: raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) __A = len(__UpperCamelCase ) + 1 if any(len(__UpperCamelCase ) != _length for item in equations ): raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) for row in equations: if any(not isinstance(__UpperCamelCase , (int, float) ) for column in row ): raise ValueError('''solve_simultaneous() requires lists of integers''' ) if len(__UpperCamelCase ) == 1: return [equations[0][-1] / equations[0][0]] __A = equations.copy() if any(0 in row for row in data_set ): __A = data_set.copy() __A = [] for row_index, row in enumerate(__UpperCamelCase ): if 0 not in row: __A = data_set.pop(__UpperCamelCase ) break if not full_row: raise ValueError('''solve_simultaneous() requires at least 1 full equation''' ) data_set.insert(0 , __UpperCamelCase ) __A = data_set.copy() __A = simplify(__UpperCamelCase ) __A = simplified[::-1] __A = [] for row in simplified: __A = row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue __A = row.copy()[: len(__UpperCamelCase ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(__UpperCamelCase ) == 0: solutions.append(0 ) continue __A = temp_row[1::] __A = temp_row[::-1] for column_index, column in enumerate(__UpperCamelCase ): current_solution -= column * solutions[column_index] solutions.append(__UpperCamelCase ) __A = [] for item in solutions: final.append(float(round(__UpperCamelCase , 5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() lowercase_ = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
266
1
"""simple docstring""" def lowerCAmelCase ( ): """simple docstring""" for n in range(1 , 1_0_0_0_0_0_0 ): yield n * (n + 1) // 2 def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = 1 __A = 2 while i * i <= n: __A = 0 while n % i == 0: n //= i multiplicity += 1 divisors_count *= multiplicity + 1 i += 1 if n > 1: divisors_count *= 2 return divisors_count def lowerCAmelCase ( ): """simple docstring""" return next(i for i in triangle_number_generator() if count_divisors(__UpperCamelCase ) > 5_0_0 ) if __name__ == "__main__": print(solution())
266
"""simple docstring""" from __future__ import annotations from typing import Any def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" if not postfix_notation: return 0 __A = {'''+''', '''-''', '''*''', '''/'''} __A = [] for token in postfix_notation: if token in operations: __A , __A = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(__UpperCamelCase ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
266
1
"""simple docstring""" from __future__ import annotations import requests lowercase_ = set( 'approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports'.split() ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase = 1 , __UpperCamelCase = "new" , __UpperCamelCase = None ): """simple docstring""" __A = wanted_data or [] if invalid_search_terms := ", ".join(sorted(set(__UpperCamelCase ) - valid_terms ) ): __A = f'Invalid search term: {invalid_search_terms}' raise ValueError(__UpperCamelCase ) __A = requests.get( f'https://reddit.com/r/{subreddit}/{age}.json?limit={limit}' , headers={'''User-agent''': '''A random string'''} , ) if response.status_code == 4_2_9: raise requests.HTTPError __A = response.json() if not wanted_data: return {id_: data["data"]["children"][id_] for id_ in range(__UpperCamelCase )} __A = {} for id_ in range(__UpperCamelCase ): __A = { item: data['''data''']['''children'''][id_]['''data'''][item] for item in wanted_data } return data_dict if __name__ == "__main__": # If you get Error 429, that means you are rate limited.Try after some time print(get_subreddit_data('learnpython', wanted_data=['title', 'url', 'selftext']))
266
"""simple docstring""" import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class snake_case ( unittest.TestCase ): '''simple docstring''' def __init__( self : Optional[Any], _lowerCamelCase : Tuple, _lowerCamelCase : List[str]=13, _lowerCamelCase : Optional[Any]=7, _lowerCamelCase : Optional[int]=True, _lowerCamelCase : int=True, _lowerCamelCase : List[str]=True, _lowerCamelCase : Optional[Any]=True, _lowerCamelCase : int=99, _lowerCamelCase : Optional[int]=32, _lowerCamelCase : Tuple=5, _lowerCamelCase : Tuple=4, _lowerCamelCase : str=37, _lowerCamelCase : Union[str, Any]="gelu", _lowerCamelCase : int=0.1, _lowerCamelCase : List[Any]=0.1, _lowerCamelCase : Dict=5_12, _lowerCamelCase : List[Any]=16, _lowerCamelCase : Any=2, _lowerCamelCase : Any=0.02, _lowerCamelCase : Dict=4, ): '''simple docstring''' __A = parent __A = batch_size __A = seq_length __A = is_training __A = use_attention_mask __A = use_token_type_ids __A = use_labels __A = vocab_size __A = hidden_size __A = num_hidden_layers __A = num_attention_heads __A = intermediate_size __A = hidden_act __A = hidden_dropout_prob __A = attention_probs_dropout_prob __A = max_position_embeddings __A = type_vocab_size __A = type_sequence_label_size __A = initializer_range __A = num_choices def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' __A = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) __A = None if self.use_attention_mask: __A = random_attention_mask([self.batch_size, self.seq_length] ) __A = None if self.use_token_type_ids: __A = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size ) __A = RoFormerConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=_lowerCamelCase, initializer_range=self.initializer_range, ) return config, input_ids, token_type_ids, attention_mask def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = self.prepare_config_and_inputs() __A , __A , __A , __A = config_and_inputs __A = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask} return config, inputs_dict @require_flax class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : Dict = True A_ : Tuple = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' __A = FlaxRoFormerModelTester(self ) @slow def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' for model_class_name in self.all_model_classes: __A = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''', from_pt=_lowerCamelCase ) __A = model(np.ones((1, 1) ) ) self.assertIsNotNone(_lowerCamelCase ) @require_flax class snake_case ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' ) __A = jnp.array([[0, 1, 2, 3, 4, 5]] ) __A = model(_lowerCamelCase )[0] __A = 5_00_00 __A = (1, 6, vocab_size) self.assertEqual(output.shape, _lowerCamelCase ) __A = jnp.array( [[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3], _lowerCamelCase, atol=1e-4 ) )
266
1
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType lowercase_ = logging.get_logger(__name__) lowercase_ = { 'microsoft/layoutlmv3-base': 'https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json', } class snake_case ( _lowerCAmelCase ): '''simple docstring''' A_ : str = "layoutlmv3" def __init__( self : Optional[int], _lowerCamelCase : Union[str, Any]=5_02_65, _lowerCamelCase : Union[str, Any]=7_68, _lowerCamelCase : Union[str, Any]=12, _lowerCamelCase : Union[str, Any]=12, _lowerCamelCase : Dict=30_72, _lowerCamelCase : Any="gelu", _lowerCamelCase : Union[str, Any]=0.1, _lowerCamelCase : List[Any]=0.1, _lowerCamelCase : Any=5_12, _lowerCamelCase : Tuple=2, _lowerCamelCase : List[Any]=0.02, _lowerCamelCase : Optional[Any]=1e-5, _lowerCamelCase : str=1, _lowerCamelCase : Dict=0, _lowerCamelCase : Optional[Any]=2, _lowerCamelCase : Any=10_24, _lowerCamelCase : Union[str, Any]=1_28, _lowerCamelCase : Optional[int]=1_28, _lowerCamelCase : Tuple=True, _lowerCamelCase : Optional[int]=32, _lowerCamelCase : int=1_28, _lowerCamelCase : Optional[int]=64, _lowerCamelCase : Tuple=2_56, _lowerCamelCase : str=True, _lowerCamelCase : List[str]=True, _lowerCamelCase : Dict=True, _lowerCamelCase : str=2_24, _lowerCamelCase : int=3, _lowerCamelCase : Any=16, _lowerCamelCase : Union[str, Any]=None, **_lowerCamelCase : Dict, ): '''simple docstring''' super().__init__( vocab_size=_lowerCamelCase, hidden_size=_lowerCamelCase, num_hidden_layers=_lowerCamelCase, num_attention_heads=_lowerCamelCase, intermediate_size=_lowerCamelCase, hidden_act=_lowerCamelCase, hidden_dropout_prob=_lowerCamelCase, attention_probs_dropout_prob=_lowerCamelCase, max_position_embeddings=_lowerCamelCase, type_vocab_size=_lowerCamelCase, initializer_range=_lowerCamelCase, layer_norm_eps=_lowerCamelCase, pad_token_id=_lowerCamelCase, bos_token_id=_lowerCamelCase, eos_token_id=_lowerCamelCase, **_lowerCamelCase, ) __A = max_ad_position_embeddings __A = coordinate_size __A = shape_size __A = has_relative_attention_bias __A = rel_pos_bins __A = max_rel_pos __A = has_spatial_attention_bias __A = rel_ad_pos_bins __A = max_rel_ad_pos __A = text_embed __A = visual_embed __A = input_size __A = num_channels __A = patch_size __A = classifier_dropout class snake_case ( _lowerCAmelCase ): '''simple docstring''' A_ : List[Any] = version.parse("1.12" ) @property def _SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' # The order of inputs is different for question answering and sequence classification if self.task in ["question-answering", "sequence-classification"]: return OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''sequence'''}), ('''bbox''', {0: '''batch''', 1: '''sequence'''}), ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) else: return OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''sequence'''}), ('''bbox''', {0: '''batch''', 1: '''sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''sequence'''}), ('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}), ] ) @property def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' return 1e-5 @property def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' return 12 def _SCREAMING_SNAKE_CASE ( self : int, _lowerCamelCase : "ProcessorMixin", _lowerCamelCase : int = -1, _lowerCamelCase : int = -1, _lowerCamelCase : bool = False, _lowerCamelCase : Optional["TensorType"] = None, _lowerCamelCase : int = 3, _lowerCamelCase : int = 40, _lowerCamelCase : int = 40, ): '''simple docstring''' setattr(processor.image_processor, '''apply_ocr''', _lowerCamelCase ) # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX __A = compute_effective_axis_dimension( _lowerCamelCase, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX __A = processor.tokenizer.num_special_tokens_to_add(_lowerCamelCase ) __A = compute_effective_axis_dimension( _lowerCamelCase, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=_lowerCamelCase ) # Generate dummy inputs according to compute batch and sequence __A = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size # Generate dummy bounding boxes __A = [[[48, 84, 73, 1_28]]] * batch_size # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX # batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) __A = self._generate_dummy_images(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) __A = dict( processor( _lowerCamelCase, text=_lowerCamelCase, boxes=_lowerCamelCase, return_tensors=_lowerCamelCase, ) ) return inputs
266
"""simple docstring""" from collections import defaultdict from math import ceil, sqrt def lowerCAmelCase ( __UpperCamelCase = 1_0_0_0_0_0_0 , __UpperCamelCase = 1_0 ): """simple docstring""" __A = defaultdict(__UpperCamelCase ) for outer_width in range(3 , (t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: __A = max( ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 ) else: __A = 1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(__UpperCamelCase , outer_width - 1 , 2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 1_0 ) if __name__ == "__main__": print(F'''{solution() = }''')
266
1
"""simple docstring""" lowercase_ = [0, 2, 4, 6, 8] lowercase_ = [1, 3, 5, 7, 9] def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" if remaining_length == 0: if digits[0] == 0 or digits[-1] == 0: return 0 for i in range(length // 2 - 1 , -1 , -1 ): remainder += digits[i] + digits[length - i - 1] if remainder % 2 == 0: return 0 remainder //= 1_0 return 1 if remaining_length == 1: if remainder % 2 == 0: return 0 __A = 0 for digit in range(1_0 ): __A = digit result += reversible_numbers( 0 , (remainder + 2 * digit) // 1_0 , __UpperCamelCase , __UpperCamelCase ) return result __A = 0 for digita in range(1_0 ): __A = digita if (remainder + digita) % 2 == 0: __A = ODD_DIGITS else: __A = EVEN_DIGITS for digita in other_parity_digits: __A = digita result += reversible_numbers( remaining_length - 2 , (remainder + digita + digita) // 1_0 , __UpperCamelCase , __UpperCamelCase , ) return result def lowerCAmelCase ( __UpperCamelCase = 9 ): """simple docstring""" __A = 0 for length in range(1 , max_power + 1 ): result += reversible_numbers(__UpperCamelCase , 0 , [0] * length , __UpperCamelCase ) return result if __name__ == "__main__": print(F'''{solution() = }''')
266
"""simple docstring""" import numpy as np import torch from torch.utils.data import DataLoader from accelerate.utils.dataclasses import DistributedType class snake_case : '''simple docstring''' def __init__( self : Optional[int], _lowerCamelCase : Optional[int]=2, _lowerCamelCase : Optional[int]=3, _lowerCamelCase : int=64, _lowerCamelCase : List[str]=None ): '''simple docstring''' __A = np.random.default_rng(_lowerCamelCase ) __A = length __A = rng.normal(size=(length,) ).astype(np.floataa ) __A = a * self.x + b + rng.normal(scale=0.1, size=(length,) ).astype(np.floataa ) def __len__( self : str ): '''simple docstring''' return self.length def __getitem__( self : Dict, _lowerCamelCase : Optional[int] ): '''simple docstring''' return {"x": self.x[i], "y": self.y[i]} class snake_case ( torch.nn.Module ): '''simple docstring''' def __init__( self : Optional[Any], _lowerCamelCase : Tuple=0, _lowerCamelCase : Any=0, _lowerCamelCase : Optional[Any]=False ): '''simple docstring''' super().__init__() __A = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) __A = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) __A = True def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : Optional[Any]=None ): '''simple docstring''' if self.first_batch: print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' ) __A = False return x * self.a[0] + self.b[0] class snake_case ( torch.nn.Module ): '''simple docstring''' def __init__( self : str, _lowerCamelCase : Optional[Any]=0, _lowerCamelCase : Any=0, _lowerCamelCase : List[Any]=False ): '''simple docstring''' super().__init__() __A = torch.nn.Parameter(torch.tensor(_lowerCamelCase ).float() ) __A = torch.nn.Parameter(torch.tensor(_lowerCamelCase ).float() ) __A = True def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : List[str]=None ): '''simple docstring''' if self.first_batch: print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' ) __A = False return x * self.a + self.b def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase = 1_6 ): """simple docstring""" from datasets import load_dataset from transformers import AutoTokenizer __A = AutoTokenizer.from_pretrained('''bert-base-cased''' ) __A = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''} __A = load_dataset('''csv''' , data_files=__UpperCamelCase ) __A = datasets['''train'''].unique('''label''' ) __A = {v: i for i, v in enumerate(__UpperCamelCase )} def tokenize_function(__UpperCamelCase ): # max_length=None => use the model max length (it's actually the default) __A = tokenizer( examples['''sentence1'''] , examples['''sentence2'''] , truncation=__UpperCamelCase , max_length=__UpperCamelCase , padding='''max_length''' ) if "label" in examples: __A = [label_to_id[l] for l in examples['''label''']] return outputs # Apply the method we just defined to all the examples in all the splits of the dataset __A = datasets.map( __UpperCamelCase , batched=__UpperCamelCase , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , ) def collate_fn(__UpperCamelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(__UpperCamelCase , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' ) return tokenizer.pad(__UpperCamelCase , padding='''longest''' , return_tensors='''pt''' ) # Instantiate dataloaders. __A = DataLoader(tokenized_datasets['''train'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=2 ) __A = DataLoader(tokenized_datasets['''validation'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=1 ) return train_dataloader, eval_dataloader
266
1
"""simple docstring""" from collections import defaultdict from math import ceil, sqrt def lowerCAmelCase ( __UpperCamelCase = 1_0_0_0_0_0_0 , __UpperCamelCase = 1_0 ): """simple docstring""" __A = defaultdict(__UpperCamelCase ) for outer_width in range(3 , (t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: __A = max( ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 ) else: __A = 1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(__UpperCamelCase , outer_width - 1 , 2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 1_0 ) if __name__ == "__main__": print(F'''{solution() = }''')
266
"""simple docstring""" from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo lowercase_ = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n' lowercase_ = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n' lowercase_ = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case ( datasets.Metric ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''', id='''token''' ), id='''sequence''' ), '''references''': datasets.Sequence( datasets.Sequence(datasets.Value('''string''', id='''token''' ), id='''sequence''' ), id='''references''' ), } ), ) def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : List[List[List[str]]], _lowerCamelCase : List[List[str]], _lowerCamelCase : int = 1, _lowerCamelCase : int = 4, ): '''simple docstring''' return { "google_bleu": gleu_score.corpus_gleu( list_of_references=_lowerCamelCase, hypotheses=_lowerCamelCase, min_len=_lowerCamelCase, max_len=_lowerCamelCase ) }
266
1
"""simple docstring""" import collections import json import math import os import re import time from fnmatch import fnmatch from typing import Dict import requests from slack_sdk import WebClient lowercase_ = WebClient(token=os.environ['CI_SLACK_BOT_TOKEN']) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = test_results.split(''' ''' ) __A = 0 __A = 0 # When the output is short enough, the output is surrounded by = signs: "== OUTPUT ==" # When it is too long, those signs are not present. __A = expressions[-2] if '''=''' in expressions[-1] else expressions[-1] for i, expression in enumerate(__UpperCamelCase ): if "failed" in expression: failed += int(expressions[i - 1] ) if "passed" in expression: success += int(expressions[i - 1] ) return failed, success, time_spent def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = {} __A = None __A = False for line in failures_short_lines.split('''\n''' ): if re.search(r'''_ \[doctest\]''' , __UpperCamelCase ): __A = True __A = line.split(''' ''' )[2] elif in_error and not line.split(''' ''' )[0].isdigit(): __A = line __A = False return failures class snake_case : '''simple docstring''' def __init__( self : Union[str, Any], _lowerCamelCase : str, _lowerCamelCase : Dict ): '''simple docstring''' __A = title __A = doc_test_results['''time_spent'''].split(''',''' )[0] __A = doc_test_results['''success'''] __A = doc_test_results['''failures'''] __A = self.n_success + self.n_failures # Failures and success of the modeling tests __A = doc_test_results @property def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' __A = [self._time_spent] __A = 0 for time in time_spent: __A = time.split(''':''' ) # Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute. if len(_lowerCamelCase ) == 1: __A = [0, 0, time_parts[0]] __A , __A , __A = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] ) total_secs += hours * 36_00 + minutes * 60 + seconds __A , __A , __A = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60 return f'{int(_lowerCamelCase )}h{int(_lowerCamelCase )}m{int(_lowerCamelCase )}s' @property def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' return {"type": "header", "text": {"type": "plain_text", "text": self.title}} @property def _SCREAMING_SNAKE_CASE ( self : List[str] ): '''simple docstring''' return { "type": "section", "text": { "type": "plain_text", "text": f'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.', "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}', }, } @property def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' return { "type": "section", "text": { "type": "plain_text", "text": ( f'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in' f' {self.time}.' ), "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}', }, } @property def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' __A = 40 __A = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(_lowerCamelCase, _lowerCamelCase )} __A = '''''' for category, failures in category_failures.items(): if len(_lowerCamelCase ) == 0: continue if report != "": report += "\n\n" report += f'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n" report += "`" report += "`\n`".join(_lowerCamelCase ) report += "`" return { "type": "section", "text": { "type": "mrkdwn", "text": f'The following examples had failures:\n\n\n{report}\n', }, } @property def _SCREAMING_SNAKE_CASE ( self : List[str] ): '''simple docstring''' __A = [self.header] if self.n_failures > 0: blocks.append(self.failures ) if self.n_failures > 0: blocks.extend([self.category_failures] ) if self.n_failures == 0: blocks.append(self.no_failures ) return json.dumps(_lowerCamelCase ) @staticmethod def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' __A = [ { '''type''': '''section''', '''text''': { '''type''': '''plain_text''', '''text''': '''There was an issue running the tests.''', }, '''accessory''': { '''type''': '''button''', '''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True}, '''url''': f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}', }, } ] print('''Sending the following payload''' ) print(json.dumps({'''blocks''': json.loads(_lowerCamelCase )} ) ) client.chat_postMessage( channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''], text='''There was an issue running the tests.''', blocks=_lowerCamelCase, ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' print('''Sending the following payload''' ) print(json.dumps({'''blocks''': json.loads(self.payload )} ) ) __A = f'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else '''All tests passed.''' __A = client.chat_postMessage( channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''], blocks=self.payload, text=_lowerCamelCase, ) def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : str, _lowerCamelCase : Dict, _lowerCamelCase : Any, _lowerCamelCase : str ): '''simple docstring''' __A = '''''' for key, value in failures.items(): __A = value[:2_00] + ''' [Truncated]''' if len(_lowerCamelCase ) > 2_50 else value failures_text += f'*{key}*\n_{value}_\n\n' __A = job_name __A = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}} if job_link is not None: __A = { '''type''': '''button''', '''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True}, '''url''': job_link, } return [ {"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}}, content, {"type": "section", "text": {"type": "mrkdwn", "text": failures_text}}, ] def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' if self.thread_ts is None: raise ValueError('''Can only post reply if a post has been made.''' ) __A = self.doc_test_results.pop('''job_link''' ) self.doc_test_results.pop('''failures''' ) self.doc_test_results.pop('''success''' ) self.doc_test_results.pop('''time_spent''' ) __A = sorted(self.doc_test_results.items(), key=lambda _lowerCamelCase : t[0] ) for job, job_result in sorted_dict: if len(job_result['''failures'''] ): __A = f'*Num failures* :{len(job_result["failed"] )} \n' __A = job_result['''failures'''] __A = self.get_reply_blocks(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, text=_lowerCamelCase ) print('''Sending the following reply''' ) print(json.dumps({'''blocks''': blocks} ) ) client.chat_postMessage( channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''], text=f'Results for {job}', blocks=_lowerCamelCase, thread_ts=self.thread_ts['''ts'''], ) time.sleep(1 ) def lowerCAmelCase ( ): """simple docstring""" __A = os.environ['''GITHUB_RUN_ID'''] __A = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100' __A = requests.get(__UpperCamelCase ).json() __A = {} try: jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} ) __A = math.ceil((result['''total_count'''] - 1_0_0) / 1_0_0 ) for i in range(__UpperCamelCase ): __A = requests.get(url + f'&page={i + 2}' ).json() jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} ) return jobs except Exception as e: print('''Unknown error, could not fetch links.''' , __UpperCamelCase ) return {} def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = {} if os.path.exists(__UpperCamelCase ): __A = os.listdir(__UpperCamelCase ) for file in files: try: with open(os.path.join(__UpperCamelCase , __UpperCamelCase ) , encoding='''utf-8''' ) as f: __A = f.read() except UnicodeDecodeError as e: raise ValueError(f'Could not open {os.path.join(__UpperCamelCase , __UpperCamelCase )}.' ) from e return _artifact def lowerCAmelCase ( ): """simple docstring""" class snake_case : '''simple docstring''' def __init__( self : Any, _lowerCamelCase : str ): '''simple docstring''' __A = name __A = [] def __str__( self : List[str] ): '''simple docstring''' return self.name def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : str ): '''simple docstring''' self.paths.append({'''name''': self.name, '''path''': path} ) __A = {} __A = filter(os.path.isdir , os.listdir() ) for directory in directories: __A = directory if artifact_name not in _available_artifacts: __A = Artifact(__UpperCamelCase ) _available_artifacts[artifact_name].add_path(__UpperCamelCase ) return _available_artifacts if __name__ == "__main__": lowercase_ = get_job_links() lowercase_ = retrieve_available_artifacts() lowercase_ = collections.OrderedDict( [ ('*.py', 'API Examples'), ('*.md', 'MD Examples'), ] ) # This dict will contain all the information relative to each doc test category: # - failed: list of failed tests # - failures: dict in the format 'test': 'error_message' lowercase_ = { v: { 'failed': [], 'failures': {}, } for v in docs.values() } # Link to the GitHub Action job lowercase_ = github_actions_job_links.get('run_doctests') lowercase_ = available_artifacts['doc_tests_gpu_test_reports'].paths[0] lowercase_ = retrieve_artifact(artifact_path['name']) if "stats" in artifact: lowercase_ , lowercase_ , lowercase_ = handle_test_results(artifact['stats']) lowercase_ = failed lowercase_ = success lowercase_ = time_spent[1:-1] + ', ' lowercase_ = extract_first_line_failure(artifact['failures_short']) for line in artifact["summary_short"].split('\n'): if re.search('FAILED', line): lowercase_ = line.replace('FAILED ', '') lowercase_ = line.split()[0].replace('\n', '') if "::" in line: lowercase_ , lowercase_ = line.split('::') else: lowercase_ , lowercase_ = line, line for file_regex in docs.keys(): if fnmatch(file_path, file_regex): lowercase_ = docs[file_regex] doc_test_results[category]["failed"].append(test) lowercase_ = all_failures[test] if test in all_failures else 'N/A' lowercase_ = failure break lowercase_ = Message('🤗 Results of the doc tests.', doc_test_results) message.post() message.post_reply()
266
"""simple docstring""" class snake_case : '''simple docstring''' def __init__( self : List[str], _lowerCamelCase : list[int] ): '''simple docstring''' __A = len(_lowerCamelCase ) __A = [0] * len_array if len_array > 0: __A = array[0] for i in range(1, _lowerCamelCase ): __A = self.prefix_sum[i - 1] + array[i] def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : int, _lowerCamelCase : int ): '''simple docstring''' if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : int ): '''simple docstring''' __A = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(_lowerCamelCase ) return False if __name__ == "__main__": import doctest doctest.testmod()
266
1
"""simple docstring""" from __future__ import annotations from collections.abc import Callable from typing import Any, Generic, TypeVar lowercase_ = TypeVar('T') class snake_case ( Generic[T] ): '''simple docstring''' def __init__( self : int, _lowerCamelCase : list[T], _lowerCamelCase : Callable[[T, T], T] ): '''simple docstring''' __A = None __A = len(_lowerCamelCase ) __A = [any_type for _ in range(self.N )] + arr __A = fnc self.build() def _SCREAMING_SNAKE_CASE ( self : List[str] ): '''simple docstring''' for p in range(self.N - 1, 0, -1 ): __A = self.fn(self.st[p * 2], self.st[p * 2 + 1] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : int, _lowerCamelCase : T ): '''simple docstring''' p += self.N __A = v while p > 1: __A = p // 2 __A = self.fn(self.st[p * 2], self.st[p * 2 + 1] ) def _SCREAMING_SNAKE_CASE ( self : Tuple, _lowerCamelCase : int, _lowerCamelCase : int ): # noqa: E741 '''simple docstring''' __A , __A = l + self.N, r + self.N __A = None while l <= r: if l % 2 == 1: __A = self.st[l] if res is None else self.fn(_lowerCamelCase, self.st[l] ) if r % 2 == 0: __A = self.st[r] if res is None else self.fn(_lowerCamelCase, self.st[r] ) __A , __A = (l + 1) // 2, (r - 1) // 2 return res if __name__ == "__main__": from functools import reduce lowercase_ = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12] lowercase_ = { 0: 7, 1: 2, 2: 6, 3: -14, 4: 5, 5: 4, 6: 7, 7: -10, 8: 9, 9: 10, 10: 12, 11: 1, } lowercase_ = SegmentTree(test_array, min) lowercase_ = SegmentTree(test_array, max) lowercase_ = SegmentTree(test_array, lambda a, b: a + b) def lowerCAmelCase ( ): """simple docstring""" for i in range(len(__UpperCamelCase ) ): for j in range(__UpperCamelCase , len(__UpperCamelCase ) ): __A = reduce(__UpperCamelCase , test_array[i : j + 1] ) __A = reduce(__UpperCamelCase , test_array[i : j + 1] ) __A = reduce(lambda __UpperCamelCase , __UpperCamelCase : a + b , test_array[i : j + 1] ) assert min_range == min_segment_tree.query(__UpperCamelCase , __UpperCamelCase ) assert max_range == max_segment_tree.query(__UpperCamelCase , __UpperCamelCase ) assert sum_range == sum_segment_tree.query(__UpperCamelCase , __UpperCamelCase ) test_all_segments() for index, value in test_updates.items(): lowercase_ = value min_segment_tree.update(index, value) max_segment_tree.update(index, value) sum_segment_tree.update(index, value) test_all_segments()
266
"""simple docstring""" import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowercase_ = logging.get_logger(__name__) lowercase_ = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } lowercase_ = { 'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'}, 'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'}, 'tokenizer_config_file': { 'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json' }, } lowercase_ = {'facebook/blenderbot-3B': 128} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def lowerCAmelCase ( ): """simple docstring""" __A = ( list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) ) ) __A = bs[:] __A = 0 for b in range(2**8 ): if b not in bs: bs.append(__UpperCamelCase ) cs.append(2**8 + n ) n += 1 __A = [chr(__UpperCamelCase ) for n in cs] return dict(zip(__UpperCamelCase , __UpperCamelCase ) ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = set() __A = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __A = char return pairs class snake_case ( _lowerCAmelCase ): '''simple docstring''' A_ : Tuple = VOCAB_FILES_NAMES A_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP A_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A_ : Optional[Any] = ["input_ids", "attention_mask"] def __init__( self : Dict, _lowerCamelCase : Optional[Any], _lowerCamelCase : List[str], _lowerCamelCase : Dict="replace", _lowerCamelCase : Any="<s>", _lowerCamelCase : Optional[int]="</s>", _lowerCamelCase : Dict="</s>", _lowerCamelCase : List[Any]="<s>", _lowerCamelCase : List[str]="<unk>", _lowerCamelCase : str="<pad>", _lowerCamelCase : Any="<mask>", _lowerCamelCase : Any=False, **_lowerCamelCase : Tuple, ): '''simple docstring''' __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else bos_token __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else eos_token __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else sep_token __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else cls_token __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else unk_token __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else mask_token super().__init__( errors=_lowerCamelCase, bos_token=_lowerCamelCase, eos_token=_lowerCamelCase, unk_token=_lowerCamelCase, sep_token=_lowerCamelCase, cls_token=_lowerCamelCase, pad_token=_lowerCamelCase, mask_token=_lowerCamelCase, add_prefix_space=_lowerCamelCase, **_lowerCamelCase, ) with open(_lowerCamelCase, encoding='''utf-8''' ) as vocab_handle: __A = json.load(_lowerCamelCase ) __A = {v: k for k, v in self.encoder.items()} __A = errors # how to handle errors in decoding __A = bytes_to_unicode() __A = {v: k for k, v in self.byte_encoder.items()} with open(_lowerCamelCase, encoding='''utf-8''' ) as merges_handle: __A = merges_handle.read().split('''\n''' )[1:-1] __A = [tuple(merge.split() ) for merge in bpe_merges] __A = dict(zip(_lowerCamelCase, range(len(_lowerCamelCase ) ) ) ) __A = {} __A = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __A = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' return len(self.encoder ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' return dict(self.encoder, **self.added_tokens_encoder ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : List[Any] ): '''simple docstring''' if token in self.cache: return self.cache[token] __A = tuple(_lowerCamelCase ) __A = get_pairs(_lowerCamelCase ) if not pairs: return token while True: __A = min(_lowerCamelCase, key=lambda _lowerCamelCase : self.bpe_ranks.get(_lowerCamelCase, float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break __A , __A = bigram __A = [] __A = 0 while i < len(_lowerCamelCase ): try: __A = word.index(_lowerCamelCase, _lowerCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __A = j if word[i] == first and i < len(_lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __A = tuple(_lowerCamelCase ) __A = new_word if len(_lowerCamelCase ) == 1: break else: __A = get_pairs(_lowerCamelCase ) __A = ''' '''.join(_lowerCamelCase ) __A = word return word def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : Dict ): '''simple docstring''' __A = [] for token in re.findall(self.pat, _lowerCamelCase ): __A = ''''''.join( self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowerCamelCase ).split(''' ''' ) ) return bpe_tokens def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : Dict ): '''simple docstring''' return self.encoder.get(_lowerCamelCase, self.encoder.get(self.unk_token ) ) def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : Any ): '''simple docstring''' return self.decoder.get(_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : Dict ): '''simple docstring''' __A = ''''''.join(_lowerCamelCase ) __A = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''', errors=self.errors ) return text def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : str, _lowerCamelCase : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(_lowerCamelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __A = os.path.join( _lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) __A = os.path.join( _lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(_lowerCamelCase, '''w''', encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=_lowerCamelCase, ensure_ascii=_lowerCamelCase ) + '''\n''' ) __A = 0 with open(_lowerCamelCase, '''w''', encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda _lowerCamelCase : kv[1] ): if index != token_index: logger.warning( f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.' ''' Please check that the tokenizer is not corrupted!''' ) __A = token_index writer.write(''' '''.join(_lowerCamelCase ) + '''\n''' ) index += 1 return vocab_file, merge_file def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : List[int], _lowerCamelCase : Optional[List[int]] = None, _lowerCamelCase : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCamelCase, token_ids_a=_lowerCamelCase, already_has_special_tokens=_lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(_lowerCamelCase )) + [1] return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1] def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : List[int], _lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' __A = [self.sep_token_id] __A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : Union[str, Any], _lowerCamelCase : List[str]=False, **_lowerCamelCase : List[Any] ): '''simple docstring''' __A = kwargs.pop('''add_prefix_space''', self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(_lowerCamelCase ) > 0 and not text[0].isspace()): __A = ''' ''' + text return (text, kwargs) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : List[int], _lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' return token_ids_a + [self.eos_token_id] def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : "Conversation" ): '''simple docstring''' __A = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(''' ''' + text ) else: # Generated responses should contain them already. inputs.append(_lowerCamelCase ) __A = ''' '''.join(_lowerCamelCase ) __A = self.encode(_lowerCamelCase ) if len(_lowerCamelCase ) > self.model_max_length: __A = input_ids[-self.model_max_length :] logger.warning(f'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' ) return input_ids
266
1
"""simple docstring""" def lowerCAmelCase ( __UpperCamelCase = 1_0_0_0 ): """simple docstring""" return sum(e for e in range(3 , __UpperCamelCase ) if e % 3 == 0 or e % 5 == 0 ) if __name__ == "__main__": print(F'''{solution() = }''')
266
"""simple docstring""" import importlib import inspect import json import os import re import shutil import sys from pathlib import Path from typing import Dict, Optional, Union from urllib import request from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info from packaging import version from .. import __version__ from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging lowercase_ = ( 'https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py' ) lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name def lowerCAmelCase ( ): """simple docstring""" __A = '''https://pypi.org/pypi/diffusers/json''' __A = json.loads(request.urlopen(__UpperCamelCase ).read() )['''releases'''].keys() return sorted(__UpperCamelCase , key=lambda __UpperCamelCase : version.Version(__UpperCamelCase ) ) def lowerCAmelCase ( ): """simple docstring""" if HF_MODULES_CACHE in sys.path: return sys.path.append(__UpperCamelCase ) os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase ) __A = Path(__UpperCamelCase ) / '''__init__.py''' if not init_path.exists(): init_path.touch() def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" init_hf_modules() __A = Path(__UpperCamelCase ) / name # If the parent module does not exist yet, recursively create it. if not dynamic_module_path.parent.exists(): create_dynamic_module(dynamic_module_path.parent ) os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase ) __A = dynamic_module_path / '''__init__.py''' if not init_path.exists(): init_path.touch() def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" with open(__UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f: __A = f.read() # Imports of the form `import .xxx` __A = re.findall('''^\s*import\s+\.(\S+)\s*$''' , __UpperCamelCase , flags=re.MULTILINE ) # Imports of the form `from .xxx import yyy` relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , __UpperCamelCase , flags=re.MULTILINE ) # Unique-ify return list(set(__UpperCamelCase ) ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = False __A = [module_file] __A = [] # Let's recurse through all relative imports while not no_change: __A = [] for f in files_to_check: new_imports.extend(get_relative_imports(__UpperCamelCase ) ) __A = Path(__UpperCamelCase ).parent __A = [str(module_path / m ) for m in new_imports] __A = [f for f in new_import_files if f not in all_relative_imports] __A = [f'{f}.py' for f in new_import_files] __A = len(__UpperCamelCase ) == 0 all_relative_imports.extend(__UpperCamelCase ) return all_relative_imports def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" with open(__UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f: __A = f.read() # Imports of the form `import xxx` __A = re.findall('''^\s*import\s+(\S+)\s*$''' , __UpperCamelCase , flags=re.MULTILINE ) # Imports of the form `from xxx import yyy` imports += re.findall('''^\s*from\s+(\S+)\s+import''' , __UpperCamelCase , flags=re.MULTILINE ) # Only keep the top-level module __A = [imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )] # Unique-ify and test we got them all __A = list(set(__UpperCamelCase ) ) __A = [] for imp in imports: try: importlib.import_module(__UpperCamelCase ) except ImportError: missing_packages.append(__UpperCamelCase ) if len(__UpperCamelCase ) > 0: raise ImportError( '''This modeling file requires the following packages that were not found in your environment: ''' f'{", ".join(__UpperCamelCase )}. Run `pip install {" ".join(__UpperCamelCase )}`' ) return get_relative_imports(__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" __A = module_path.replace(os.path.sep , '''.''' ) __A = importlib.import_module(__UpperCamelCase ) if class_name is None: return find_pipeline_class(__UpperCamelCase ) return getattr(__UpperCamelCase , __UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" from ..pipelines import DiffusionPipeline __A = dict(inspect.getmembers(__UpperCamelCase , inspect.isclass ) ) __A = None for cls_name, cls in cls_members.items(): if ( cls_name != DiffusionPipeline.__name__ and issubclass(cls , __UpperCamelCase ) and cls.__module__.split('''.''' )[0] != "diffusers" ): if pipeline_class is not None: raise ValueError( f'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:' f' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in' f' {loaded_module}.' ) __A = cls return pipeline_class def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , ): """simple docstring""" __A = str(__UpperCamelCase ) __A = os.path.join(__UpperCamelCase , __UpperCamelCase ) if os.path.isfile(__UpperCamelCase ): __A = module_file_or_url __A = '''local''' elif pretrained_model_name_or_path.count('''/''' ) == 0: __A = get_diffusers_versions() # cut ".dev0" __A = '''v''' + '''.'''.join(__version__.split('''.''' )[:3] ) # retrieve github version that matches if revision is None: __A = latest_version if latest_version[1:] in available_versions else '''main''' logger.info(f'Defaulting to latest_version: {revision}.' ) elif revision in available_versions: __A = f'v{revision}' elif revision == "main": __A = revision else: raise ValueError( f'`custom_revision`: {revision} does not exist. Please make sure to choose one of' f' {", ".join(available_versions + ["main"] )}.' ) # community pipeline on GitHub __A = COMMUNITY_PIPELINES_URL.format(revision=__UpperCamelCase , pipeline=__UpperCamelCase ) try: __A = cached_download( __UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , proxies=__UpperCamelCase , resume_download=__UpperCamelCase , local_files_only=__UpperCamelCase , use_auth_token=__UpperCamelCase , ) __A = '''git''' __A = pretrained_model_name_or_path + '''.py''' except EnvironmentError: logger.error(f'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' ) raise else: try: # Load from URL or cache if already cached __A = hf_hub_download( __UpperCamelCase , __UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , proxies=__UpperCamelCase , resume_download=__UpperCamelCase , local_files_only=__UpperCamelCase , use_auth_token=__UpperCamelCase , ) __A = os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) ) except EnvironmentError: logger.error(f'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' ) raise # Check we have all the requirements in our environment __A = check_imports(__UpperCamelCase ) # Now we move the module inside our cached dynamic modules. __A = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule create_dynamic_module(__UpperCamelCase ) __A = Path(__UpperCamelCase ) / full_submodule if submodule == "local" or submodule == "git": # We always copy local files (we could hash the file to see if there was a change, and give them the name of # that hash, to only copy when there is a modification but it seems overkill for now). # The only reason we do the copy is to avoid putting too many folders in sys.path. shutil.copy(__UpperCamelCase , submodule_path / module_file ) for module_needed in modules_needed: __A = f'{module_needed}.py' shutil.copy(os.path.join(__UpperCamelCase , __UpperCamelCase ) , submodule_path / module_needed ) else: # Get the commit hash # TODO: we will get this info in the etag soon, so retrieve it from there and not here. if isinstance(__UpperCamelCase , __UpperCamelCase ): __A = use_auth_token elif use_auth_token is True: __A = HfFolder.get_token() else: __A = None __A = model_info(__UpperCamelCase , revision=__UpperCamelCase , token=__UpperCamelCase ).sha # The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the # benefit of versioning. __A = submodule_path / commit_hash __A = full_submodule + os.path.sep + commit_hash create_dynamic_module(__UpperCamelCase ) if not (submodule_path / module_file).exists(): shutil.copy(__UpperCamelCase , submodule_path / module_file ) # Make sure we also have every file with relative for module_needed in modules_needed: if not (submodule_path / module_needed).exists(): get_cached_module_file( __UpperCamelCase , f'{module_needed}.py' , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , resume_download=__UpperCamelCase , proxies=__UpperCamelCase , use_auth_token=__UpperCamelCase , revision=__UpperCamelCase , local_files_only=__UpperCamelCase , ) return os.path.join(__UpperCamelCase , __UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , **__UpperCamelCase , ): """simple docstring""" __A = get_cached_module_file( __UpperCamelCase , __UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , resume_download=__UpperCamelCase , proxies=__UpperCamelCase , use_auth_token=__UpperCamelCase , revision=__UpperCamelCase , local_files_only=__UpperCamelCase , ) return get_class_in_module(__UpperCamelCase , final_module.replace('''.py''' , '''''' ) )
266
1
"""simple docstring""" import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version('>=', FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType lowercase_ = get_logger(__name__) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=0 ): """simple docstring""" os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase ) with FSDP.state_dict_type( __UpperCamelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): __A = model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: __A = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin' __A = os.path.join(__UpperCamelCase , __UpperCamelCase ) if accelerator.process_index == 0: logger.info(f'Saving model to {output_model_file}' ) torch.save(__UpperCamelCase , __UpperCamelCase ) logger.info(f'Model saved to {output_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: __A = ( f'{MODEL_NAME}_rank{accelerator.process_index}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin' ) __A = os.path.join(__UpperCamelCase , __UpperCamelCase ) logger.info(f'Saving model to {output_model_file}' ) torch.save(__UpperCamelCase , __UpperCamelCase ) logger.info(f'Model saved to {output_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: __A = os.path.join(__UpperCamelCase , f'{MODEL_NAME}_{model_index}' ) os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase ) logger.info(f'Saving model to {ckpt_dir}' ) __A = {'''model''': state_dict} dist_cp.save_state_dict( state_dict=__UpperCamelCase , storage_writer=dist_cp.FileSystemWriter(__UpperCamelCase ) , planner=DefaultSavePlanner() , ) logger.info(f'Model saved to {ckpt_dir}' ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=0 ): """simple docstring""" accelerator.wait_for_everyone() with FSDP.state_dict_type( __UpperCamelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(__UpperCamelCase ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( '''Set the `sync_module_states` flag to `True` so that model states are synced across processes when ''' '''initializing FSDP object''' ) return __A = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin' __A = os.path.join(__UpperCamelCase , __UpperCamelCase ) logger.info(f'Loading model from {input_model_file}' ) __A = torch.load(__UpperCamelCase ) logger.info(f'Model loaded from {input_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: __A = ( f'{MODEL_NAME}_rank{accelerator.process_index}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin' ) __A = os.path.join(__UpperCamelCase , __UpperCamelCase ) logger.info(f'Loading model from {input_model_file}' ) __A = torch.load(__UpperCamelCase ) logger.info(f'Model loaded from {input_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: __A = ( os.path.join(__UpperCamelCase , f'{MODEL_NAME}_{model_index}' ) if f'{MODEL_NAME}' not in input_dir else input_dir ) logger.info(f'Loading model from {ckpt_dir}' ) __A = {'''model''': model.state_dict()} dist_cp.load_state_dict( state_dict=__UpperCamelCase , storage_reader=dist_cp.FileSystemReader(__UpperCamelCase ) , planner=DefaultLoadPlanner() , ) __A = state_dict['''model'''] logger.info(f'Model loaded from {ckpt_dir}' ) model.load_state_dict(__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=0 ): """simple docstring""" os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase ) with FSDP.state_dict_type( __UpperCamelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): __A = FSDP.optim_state_dict(__UpperCamelCase , __UpperCamelCase ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: __A = ( f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin' ) __A = os.path.join(__UpperCamelCase , __UpperCamelCase ) logger.info(f'Saving Optimizer state to {output_optimizer_file}' ) torch.save(__UpperCamelCase , __UpperCamelCase ) logger.info(f'Optimizer state saved in {output_optimizer_file}' ) else: __A = os.path.join(__UpperCamelCase , f'{OPTIMIZER_NAME}_{optimizer_index}' ) os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase ) logger.info(f'Saving Optimizer state to {ckpt_dir}' ) dist_cp.save_state_dict( state_dict={'''optimizer''': optim_state} , storage_writer=dist_cp.FileSystemWriter(__UpperCamelCase ) , planner=DefaultSavePlanner() , ) logger.info(f'Optimizer state saved in {ckpt_dir}' ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=0 ): """simple docstring""" accelerator.wait_for_everyone() with FSDP.state_dict_type( __UpperCamelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: __A = None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: __A = ( f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin' ) __A = os.path.join(__UpperCamelCase , __UpperCamelCase ) logger.info(f'Loading Optimizer state from {input_optimizer_file}' ) __A = torch.load(__UpperCamelCase ) logger.info(f'Optimizer state loaded from {input_optimizer_file}' ) else: __A = ( os.path.join(__UpperCamelCase , f'{OPTIMIZER_NAME}_{optimizer_index}' ) if f'{OPTIMIZER_NAME}' not in input_dir else input_dir ) logger.info(f'Loading Optimizer from {ckpt_dir}' ) __A = load_sharded_optimizer_state_dict( model_state_dict=model.state_dict() , optimizer_key='''optimizer''' , storage_reader=dist_cp.FileSystemReader(__UpperCamelCase ) , ) __A = optim_state['''optimizer'''] logger.info(f'Optimizer loaded from {ckpt_dir}' ) __A = FSDP.optim_state_dict_to_load(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) optimizer.load_state_dict(__UpperCamelCase )
266
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : Optional[int] ): '''simple docstring''' for model_result in results.values(): for batch_size, sequence_length in zip(model_result['''bs'''], model_result['''ss'''] ): __A = model_result['''result'''][batch_size][sequence_length] self.assertIsNotNone(_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = '''sgugger/tiny-distilbert-classification''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, only_pretrain_model=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, torchscript=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == '''cpu''', '''Cant do half precision''' ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, fpaa=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = AutoConfig.from_pretrained(_lowerCamelCase ) # set architectures equal to `None` __A = None __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase, configs=[config] ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == '''cpu''', '''Can\'t do half precision''' ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], fpaa=_lowerCamelCase, multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = AutoConfig.from_pretrained(_lowerCamelCase ) __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase, configs=[config] ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' __A = '''sshleifer/tinier_bart''' __A = AutoConfig.from_pretrained(_lowerCamelCase ) __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase, configs=[config] ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = AutoConfig.from_pretrained(_lowerCamelCase ) __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase, configs=[config] ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = '''sshleifer/tinier_bart''' __A = AutoConfig.from_pretrained(_lowerCamelCase ) __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase, configs=[config] ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' with tempfile.TemporaryDirectory() as tmp_dir: __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, save_to_csv=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], inference_time_csv_file=os.path.join(_lowerCamelCase, '''inf_time.csv''' ), train_memory_csv_file=os.path.join(_lowerCamelCase, '''train_mem.csv''' ), inference_memory_csv_file=os.path.join(_lowerCamelCase, '''inf_mem.csv''' ), train_time_csv_file=os.path.join(_lowerCamelCase, '''train_time.csv''' ), env_info_csv_file=os.path.join(_lowerCamelCase, '''env.csv''' ), multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) benchmark.run() self.assertTrue(Path(os.path.join(_lowerCamelCase, '''inf_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCamelCase, '''train_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCamelCase, '''inf_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCamelCase, '''train_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCamelCase, '''env.csv''' ) ).exists() ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' def _check_summary_is_not_empty(_lowerCamelCase : List[Any] ): self.assertTrue(hasattr(_lowerCamelCase, '''sequential''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''cumulative''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''current''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''total''' ) ) with tempfile.TemporaryDirectory() as tmp_dir: __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], log_filename=os.path.join(_lowerCamelCase, '''log.txt''' ), log_print=_lowerCamelCase, trace_memory_line_by_line=_lowerCamelCase, multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(_lowerCamelCase, '''log.txt''' ) ).exists() )
266
1
"""simple docstring""" from collections import defaultdict def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = 1 __A = True for v in tree[start]: if v not in visited: ret += dfs(__UpperCamelCase ) if ret % 2 == 0: cuts.append(__UpperCamelCase ) return ret def lowerCAmelCase ( ): """simple docstring""" dfs(1 ) if __name__ == "__main__": lowercase_ , lowercase_ = 10, 9 lowercase_ = defaultdict(list) lowercase_ = {} lowercase_ = [] lowercase_ = 0 lowercase_ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)] for u, v in edges: tree[u].append(v) tree[v].append(u) even_tree() print(len(cuts) - 1)
266
"""simple docstring""" import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowercase_ = get_tests_dir('fixtures/test_sentencepiece_no_bos.model') @require_sentencepiece @require_tokenizers class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : Optional[Any] = PegasusTokenizer A_ : int = PegasusTokenizerFast A_ : Optional[Any] = True A_ : Union[str, Any] = True def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing __A = PegasusTokenizer(_lowerCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' return PegasusTokenizer.from_pretrained('''google/pegasus-large''' ) def _SCREAMING_SNAKE_CASE ( self : int, **_lowerCamelCase : List[Any] ): '''simple docstring''' return PegasusTokenizer.from_pretrained(self.tmpdirname, **_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : Dict ): '''simple docstring''' return ("This is a test", "This is a test") def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' __A = '''</s>''' __A = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ), _lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ), _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' __A = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0], '''<pad>''' ) self.assertEqual(vocab_keys[1], '''</s>''' ) self.assertEqual(vocab_keys[-1], '''v''' ) self.assertEqual(len(_lowerCamelCase ), 11_03 ) def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size, 11_03 ) def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) __A = self.tokenizer_class.from_pretrained(self.tmpdirname ) __A = ( '''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important''' ''' </s> <pad> <pad> <pad>''' ) __A = rust_tokenizer([raw_input_str], return_tensors=_lowerCamelCase, add_special_tokens=_lowerCamelCase ).input_ids[0] __A = py_tokenizer([raw_input_str], return_tensors=_lowerCamelCase, add_special_tokens=_lowerCamelCase ).input_ids[0] self.assertListEqual(_lowerCamelCase, _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' __A = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word __A = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.''' __A = [2, 4_13, 6_15, 1_14, 3, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1] __A = tokenizer([raw_input_str], return_tensors=_lowerCamelCase ).input_ids[0] self.assertListEqual(_lowerCamelCase, _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 9_61_03 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 1_03 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_05 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 10_24 __A = '''To ensure a smooth flow of bank resolutions.''' __A = [4_13, 6_15, 1_14, 22_91, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1] __A = tokenizer([raw_input_str], return_tensors=_lowerCamelCase ).input_ids[0] self.assertListEqual(_lowerCamelCase, _lowerCamelCase ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = ['''This is going to be way too long.''' * 1_50, '''short example'''] __A = ['''not super long but more than 5 tokens''', '''tiny'''] __A = self._large_tokenizer(_lowerCamelCase, padding=_lowerCamelCase, truncation=_lowerCamelCase, return_tensors='''pt''' ) __A = self._large_tokenizer( text_target=_lowerCamelCase, max_length=5, padding=_lowerCamelCase, truncation=_lowerCamelCase, return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 10_24) assert batch.attention_mask.shape == (2, 10_24) assert targets["input_ids"].shape == (2, 5) assert len(_lowerCamelCase ) == 2 # input_ids, attention_mask. @slow def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' # fmt: off __A = {'''input_ids''': [[3_89_79, 1_43, 1_84_85, 6_06, 1_30, 2_66_69, 8_76_86, 1_21, 5_41_89, 11_29, 1_11, 2_66_69, 8_76_86, 1_21, 91_14, 1_47_87, 1_21, 1_32_49, 1_58, 5_92, 9_56, 1_21, 1_46_21, 3_15_76, 1_43, 6_26_13, 1_08, 96_88, 9_30, 4_34_30, 1_15_62, 6_26_13, 3_04, 1_08, 1_14_43, 8_97, 1_08, 93_14, 1_74_15, 6_33_99, 1_08, 1_14_43, 76_14, 1_83_16, 1_18, 42_84, 71_48, 1_24_30, 1_43, 14_00, 2_57_03, 1_58, 1_11, 42_84, 71_48, 1_17_72, 1_43, 2_12_97, 10_64, 1_58, 1_22, 2_04, 35_06, 17_54, 11_33, 1_47_87, 15_81, 1_15, 3_32_24, 44_82, 1_11, 13_55, 1_10, 2_91_73, 3_17, 5_08_33, 1_08, 2_01_47, 9_46_65, 1_11, 7_71_98, 1_07, 1], [1_10, 6_26_13, 1_17, 6_38, 1_12, 11_33, 1_21, 2_00_98, 13_55, 7_90_50, 1_38_72, 1_35, 15_96, 5_35_41, 13_52, 1_41, 1_30_39, 55_42, 1_24, 3_02, 5_18, 1_11, 2_68, 29_56, 1_15, 1_49, 44_27, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_39, 12_35, 27_99, 1_82_89, 1_77_80, 2_04, 1_09, 94_74, 12_96, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_lowerCamelCase, model_name='''google/bigbird-pegasus-large-arxiv''', revision='''ba85d0851d708441f91440d509690f1ab6353415''', ) @require_sentencepiece @require_tokenizers class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : str = PegasusTokenizer A_ : Union[str, Any] = PegasusTokenizerFast A_ : Any = True A_ : str = True def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing __A = PegasusTokenizer(_lowerCamelCase, offset=0, mask_token_sent=_lowerCamelCase, mask_token='''[MASK]''' ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' ) def _SCREAMING_SNAKE_CASE ( self : Optional[int], **_lowerCamelCase : Dict ): '''simple docstring''' return PegasusTokenizer.from_pretrained(self.tmpdirname, **_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : List[str] ): '''simple docstring''' return ("This is a test", "This is a test") def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' __A = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) __A = self.tokenizer_class.from_pretrained(self.tmpdirname ) __A = ( '''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>''' ''' <pad> <pad> <pad>''' ) __A = rust_tokenizer([raw_input_str], return_tensors=_lowerCamelCase, add_special_tokens=_lowerCamelCase ).input_ids[0] __A = py_tokenizer([raw_input_str], return_tensors=_lowerCamelCase, add_special_tokens=_lowerCamelCase ).input_ids[0] self.assertListEqual(_lowerCamelCase, _lowerCamelCase ) @require_torch def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' __A = ['''This is going to be way too long.''' * 10_00, '''short example'''] __A = ['''not super long but more than 5 tokens''', '''tiny'''] __A = self._large_tokenizer(_lowerCamelCase, padding=_lowerCamelCase, truncation=_lowerCamelCase, return_tensors='''pt''' ) __A = self._large_tokenizer( text_target=_lowerCamelCase, max_length=5, padding=_lowerCamelCase, truncation=_lowerCamelCase, return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 40_96) assert batch.attention_mask.shape == (2, 40_96) assert targets["input_ids"].shape == (2, 5) assert len(_lowerCamelCase ) == 2 # input_ids, attention_mask. def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' __A = ( '''This is an example string that is used to test the original TF implementation against the HF''' ''' implementation''' ) __A = self._large_tokenizer(_lowerCamelCase ).input_ids self.assertListEqual( _lowerCamelCase, [1_82, 1_17, 1_42, 5_87, 42_11, 1_20, 1_17, 2_63, 1_12, 8_04, 1_09, 8_56, 2_50_16, 31_37, 4_64, 1_09, 2_69_55, 31_37, 1], )
266
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { 'facebook/deit-base-distilled-patch16-224': ( 'https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json' ), # See all DeiT models at https://huggingface.co/models?filter=deit } class snake_case ( _lowerCAmelCase ): '''simple docstring''' A_ : Union[str, Any] = "deit" def __init__( self : List[str], _lowerCamelCase : List[Any]=7_68, _lowerCamelCase : Tuple=12, _lowerCamelCase : Optional[Any]=12, _lowerCamelCase : Optional[int]=30_72, _lowerCamelCase : Union[str, Any]="gelu", _lowerCamelCase : Dict=0.0, _lowerCamelCase : List[str]=0.0, _lowerCamelCase : List[str]=0.02, _lowerCamelCase : int=1e-12, _lowerCamelCase : Any=2_24, _lowerCamelCase : Any=16, _lowerCamelCase : Tuple=3, _lowerCamelCase : Optional[Any]=True, _lowerCamelCase : Optional[Any]=16, **_lowerCamelCase : str, ): '''simple docstring''' super().__init__(**_lowerCamelCase ) __A = hidden_size __A = num_hidden_layers __A = num_attention_heads __A = intermediate_size __A = hidden_act __A = hidden_dropout_prob __A = attention_probs_dropout_prob __A = initializer_range __A = layer_norm_eps __A = image_size __A = patch_size __A = num_channels __A = qkv_bias __A = encoder_stride class snake_case ( _lowerCAmelCase ): '''simple docstring''' A_ : str = version.parse("1.11" ) @property def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' return 1e-4
266
"""simple docstring""" import re def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" return [char.split() for char in re.split(r'''[^ a-z A-Z 0-9 \s]''' , str_ )] def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = split_input(str_ ) return "".join( [''''''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" try: __A = split_input(__UpperCamelCase ) if upper: __A = ''''''.join( [ separator.join([char.upper() for char in sub_str] ) for sub_str in string_split ] ) else: __A = ''''''.join( [ separator.join([char.lower() for char in sub_str] ) for sub_str in string_split ] ) return res_str except IndexError: return "not valid string" def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" return to_simple_case(__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" try: __A = to_simple_case(__UpperCamelCase ) return res_str[0].lower() + res_str[1:] except IndexError: return "not valid string" def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" return to_complex_case(__UpperCamelCase , __UpperCamelCase , '''_''' ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" return to_complex_case(__UpperCamelCase , __UpperCamelCase , '''-''' ) if __name__ == "__main__": __import__('doctest').testmod()
266
1
"""simple docstring""" import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=True , __UpperCamelCase="pt" ): """simple docstring""" __A = {'''add_prefix_space''': True} if isinstance(__UpperCamelCase , __UpperCamelCase ) and not line.startswith(''' ''' ) else {} __A = padding_side return tokenizer( [line] , max_length=__UpperCamelCase , padding='''max_length''' if pad_to_max_length else None , truncation=__UpperCamelCase , return_tensors=__UpperCamelCase , add_special_tokens=__UpperCamelCase , **__UpperCamelCase , ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , ): """simple docstring""" __A = input_ids.ne(__UpperCamelCase ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class snake_case ( _lowerCAmelCase ): '''simple docstring''' def __init__( self : Tuple, _lowerCamelCase : List[str], _lowerCamelCase : int, _lowerCamelCase : Tuple, _lowerCamelCase : Tuple, _lowerCamelCase : str="train", _lowerCamelCase : Dict=None, _lowerCamelCase : int=None, _lowerCamelCase : Any=None, _lowerCamelCase : Union[str, Any]="", ): '''simple docstring''' super().__init__() __A = Path(_lowerCamelCase ).joinpath(type_path + '''.source''' ) __A = Path(_lowerCamelCase ).joinpath(type_path + '''.target''' ) __A = self.get_char_lens(self.src_file ) __A = max_source_length __A = max_target_length assert min(self.src_lens ) > 0, f'found empty line in {self.src_file}' __A = tokenizer __A = prefix if n_obs is not None: __A = self.src_lens[:n_obs] __A = src_lang __A = tgt_lang def __len__( self : Optional[Any] ): '''simple docstring''' return len(self.src_lens ) def __getitem__( self : List[str], _lowerCamelCase : Dict ): '''simple docstring''' __A = index + 1 # linecache starts at 1 __A = self.prefix + linecache.getline(str(self.src_file ), _lowerCamelCase ).rstrip('''\n''' ) __A = linecache.getline(str(self.tgt_file ), _lowerCamelCase ).rstrip('''\n''' ) assert source_line, f'empty source line for index {index}' assert tgt_line, f'empty tgt line for index {index}' # Need to add eos token manually for T5 if isinstance(self.tokenizer, _lowerCamelCase ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right __A = ( self.tokenizer.question_encoder if isinstance(self.tokenizer, _lowerCamelCase ) else self.tokenizer ) __A = self.tokenizer.generator if isinstance(self.tokenizer, _lowerCamelCase ) else self.tokenizer __A = encode_line(_lowerCamelCase, _lowerCamelCase, self.max_source_length, '''right''' ) __A = encode_line(_lowerCamelCase, _lowerCamelCase, self.max_target_length, '''right''' ) __A = source_inputs['''input_ids'''].squeeze() __A = target_inputs['''input_ids'''].squeeze() __A = source_inputs['''attention_mask'''].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ): '''simple docstring''' return [len(_lowerCamelCase ) for x in Path(_lowerCamelCase ).open().readlines()] def _SCREAMING_SNAKE_CASE ( self : Tuple, _lowerCamelCase : int ): '''simple docstring''' __A = torch.stack([x['''input_ids'''] for x in batch] ) __A = torch.stack([x['''attention_mask'''] for x in batch] ) __A = torch.stack([x['''decoder_input_ids'''] for x in batch] ) __A = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer, _lowerCamelCase ) else self.tokenizer.pad_token_id ) __A = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer, _lowerCamelCase ) else self.tokenizer.pad_token_id ) __A = trim_batch(_lowerCamelCase, _lowerCamelCase ) __A , __A = trim_batch(_lowerCamelCase, _lowerCamelCase, attention_mask=_lowerCamelCase ) __A = { '''input_ids''': source_ids, '''attention_mask''': source_mask, '''decoder_input_ids''': y, } return batch lowercase_ = getLogger(__name__) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" return list(itertools.chain.from_iterable(__UpperCamelCase ) ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = get_git_info() save_json(__UpperCamelCase , os.path.join(__UpperCamelCase , '''git_log.json''' ) ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=4 , **__UpperCamelCase ): """simple docstring""" with open(__UpperCamelCase , '''w''' ) as f: json.dump(__UpperCamelCase , __UpperCamelCase , indent=__UpperCamelCase , **__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" with open(__UpperCamelCase ) as f: return json.load(__UpperCamelCase ) def lowerCAmelCase ( ): """simple docstring""" __A = git.Repo(search_parent_directories=__UpperCamelCase ) __A = { '''repo_id''': str(__UpperCamelCase ), '''repo_sha''': str(repo.head.object.hexsha ), '''repo_branch''': str(repo.active_branch ), '''hostname''': str(socket.gethostname() ), } return repo_infos def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" return list(map(__UpperCamelCase , __UpperCamelCase ) ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" with open(__UpperCamelCase , '''wb''' ) as f: return pickle.dump(__UpperCamelCase , __UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" def remove_articles(__UpperCamelCase ): return re.sub(r'''\b(a|an|the)\b''' , ''' ''' , __UpperCamelCase ) def white_space_fix(__UpperCamelCase ): return " ".join(text.split() ) def remove_punc(__UpperCamelCase ): __A = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(__UpperCamelCase ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(__UpperCamelCase ) ) ) ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" __A = normalize_answer(__UpperCamelCase ).split() __A = normalize_answer(__UpperCamelCase ).split() __A = Counter(__UpperCamelCase ) & Counter(__UpperCamelCase ) __A = sum(common.values() ) if num_same == 0: return 0 __A = 1.0 * num_same / len(__UpperCamelCase ) __A = 1.0 * num_same / len(__UpperCamelCase ) __A = (2 * precision * recall) / (precision + recall) return fa def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" return normalize_answer(__UpperCamelCase ) == normalize_answer(__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" assert len(__UpperCamelCase ) == len(__UpperCamelCase ) __A = 0 for hypo, pred in zip(__UpperCamelCase , __UpperCamelCase ): em += exact_match_score(__UpperCamelCase , __UpperCamelCase ) if len(__UpperCamelCase ) > 0: em /= len(__UpperCamelCase ) return {"em": em} def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" return model_prefix.startswith('''rag''' ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" __A = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead __A = '''dropout_rate''' for p in extra_params: if getattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): if not hasattr(__UpperCamelCase , __UpperCamelCase ) and not hasattr(__UpperCamelCase , equivalent_param[p] ): logger.info('''config doesn\'t have a `{}` attribute'''.format(__UpperCamelCase ) ) delattr(__UpperCamelCase , __UpperCamelCase ) continue __A = p if hasattr(__UpperCamelCase , __UpperCamelCase ) else equivalent_param[p] setattr(__UpperCamelCase , __UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) ) delattr(__UpperCamelCase , __UpperCamelCase ) return hparams, config
266
"""simple docstring""" from __future__ import annotations class snake_case : '''simple docstring''' def __init__( self : int, _lowerCamelCase : List[Any]=None ): '''simple docstring''' __A = data __A = None def __repr__( self : Union[str, Any] ): '''simple docstring''' __A = [] __A = self while temp: string_rep.append(f'{temp.data}' ) __A = temp.next return "->".join(_lowerCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" if not elements_list: raise Exception('''The Elements List is empty''' ) __A = __A = Node(elements_list[0] ) for i in range(1 , len(__UpperCamelCase ) ): __A = Node(elements_list[i] ) __A = current.next return head def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" if head_node is not None and isinstance(__UpperCamelCase , __UpperCamelCase ): print_reverse(head_node.next ) print(head_node.data ) def lowerCAmelCase ( ): """simple docstring""" from doctest import testmod testmod() __A = make_linked_list([1_4, 5_2, 1_4, 1_2, 4_3] ) print('''Linked List:''' ) print(__UpperCamelCase ) print('''Elements in Reverse:''' ) print_reverse(__UpperCamelCase ) if __name__ == "__main__": main()
266
1
"""simple docstring""" import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() lowercase_ = logging.get_logger(__name__) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" print('''Loading config file...''' ) def flatten_yaml_as_dict(__UpperCamelCase , __UpperCamelCase="" , __UpperCamelCase="." ): __A = [] for k, v in d.items(): __A = parent_key + sep + k if parent_key else k if isinstance(__UpperCamelCase , collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(__UpperCamelCase , __UpperCamelCase , sep=__UpperCamelCase ).items() ) else: items.append((new_key, v) ) return dict(__UpperCamelCase ) __A = argparse.Namespace() with open(__UpperCamelCase , '''r''' ) as yaml_file: try: __A = yaml.load(__UpperCamelCase , Loader=yaml.FullLoader ) __A = flatten_yaml_as_dict(__UpperCamelCase ) for k, v in flat_cfg.items(): setattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) except yaml.YAMLError as exc: logger.error('''Error while loading config file: {}. Error message: {}'''.format(__UpperCamelCase , str(__UpperCamelCase ) ) ) return config def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" __A = MobileViTVaConfig() __A = False # dataset if task_name.startswith('''imagenet1k_''' ): __A = 1_0_0_0 if int(task_name.strip().split('''_''' )[-1] ) == 3_8_4: __A = 3_8_4 else: __A = 2_5_6 __A = '''imagenet-1k-id2label.json''' elif task_name.startswith('''imagenet21k_to_1k_''' ): __A = 2_1_0_0_0 if int(task_name.strip().split('''_''' )[-1] ) == 3_8_4: __A = 3_8_4 else: __A = 2_5_6 __A = '''imagenet-22k-id2label.json''' elif task_name.startswith('''ade20k_''' ): __A = 1_5_1 __A = 5_1_2 __A = '''ade20k-id2label.json''' __A = True elif task_name.startswith('''voc_''' ): __A = 2_1 __A = 5_1_2 __A = '''pascal-voc-id2label.json''' __A = True # orig_config __A = load_orig_config_file(__UpperCamelCase ) assert getattr(__UpperCamelCase , '''model.classification.name''' , -1 ) == "mobilevit_v2", "Invalid model" __A = getattr(__UpperCamelCase , '''model.classification.mitv2.width_multiplier''' , 1.0 ) assert ( getattr(__UpperCamelCase , '''model.classification.mitv2.attn_norm_layer''' , -1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" __A = getattr(__UpperCamelCase , '''model.classification.activation.name''' , '''swish''' ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: __A = getattr(__UpperCamelCase , '''model.segmentation.output_stride''' , 1_6 ) if "_deeplabv3" in task_name: __A = getattr(__UpperCamelCase , '''model.segmentation.deeplabv3.aspp_rates''' , [1_2, 2_4, 3_6] ) __A = getattr(__UpperCamelCase , '''model.segmentation.deeplabv3.aspp_out_channels''' , 5_1_2 ) __A = getattr(__UpperCamelCase , '''model.segmentation.deeplabv3.aspp_dropout''' , 0.1 ) # id2label __A = '''huggingface/label-files''' __A = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) ) __A = {int(__UpperCamelCase ): v for k, v in idalabel.items()} __A = idalabel __A = {v: k for k, v in idalabel.items()} return config def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" __A = dct.pop(__UpperCamelCase ) __A = val def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase=False ): """simple docstring""" if base_model: __A = '''''' else: __A = '''mobilevitv2.''' __A = [] for k in state_dict.keys(): if k[:8] == "encoder.": __A = k[8:] else: __A = k if ".block." in k: __A = k_new.replace('''.block.''' , '''.''' ) if ".conv." in k: __A = k_new.replace('''.conv.''' , '''.convolution.''' ) if ".norm." in k: __A = k_new.replace('''.norm.''' , '''.normalization.''' ) if "conv_1." in k: __A = k_new.replace('''conv_1.''' , f'{model_prefix}conv_stem.' ) for i in [1, 2]: if f'layer_{i}.' in k: __A = k_new.replace(f'layer_{i}.' , f'{model_prefix}encoder.layer.{i-1}.layer.' ) if ".exp_1x1." in k: __A = k_new.replace('''.exp_1x1.''' , '''.expand_1x1.''' ) if ".red_1x1." in k: __A = k_new.replace('''.red_1x1.''' , '''.reduce_1x1.''' ) for i in [3, 4, 5]: if f'layer_{i}.0.' in k: __A = k_new.replace(f'layer_{i}.0.' , f'{model_prefix}encoder.layer.{i-1}.downsampling_layer.' ) if f'layer_{i}.1.local_rep.0.' in k: __A = k_new.replace(f'layer_{i}.1.local_rep.0.' , f'{model_prefix}encoder.layer.{i-1}.conv_kxk.' ) if f'layer_{i}.1.local_rep.1.' in k: __A = k_new.replace(f'layer_{i}.1.local_rep.1.' , f'{model_prefix}encoder.layer.{i-1}.conv_1x1.' ) for i in [3, 4, 5]: if i == 3: __A = [0, 1] elif i == 4: __A = [0, 1, 2, 3] elif i == 5: __A = [0, 1, 2] for j in j_in: if f'layer_{i}.1.global_rep.{j}.' in k: __A = k_new.replace( f'layer_{i}.1.global_rep.{j}.' , f'{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.' ) if f'layer_{i}.1.global_rep.{j+1}.' in k: __A = k_new.replace( f'layer_{i}.1.global_rep.{j+1}.' , f'{model_prefix}encoder.layer.{i-1}.layernorm.' ) if f'layer_{i}.1.conv_proj.' in k: __A = k_new.replace(f'layer_{i}.1.conv_proj.' , f'{model_prefix}encoder.layer.{i-1}.conv_projection.' ) if "pre_norm_attn.0." in k: __A = k_new.replace('''pre_norm_attn.0.''' , '''layernorm_before.''' ) if "pre_norm_attn.1." in k: __A = k_new.replace('''pre_norm_attn.1.''' , '''attention.''' ) if "pre_norm_ffn.0." in k: __A = k_new.replace('''pre_norm_ffn.0.''' , '''layernorm_after.''' ) if "pre_norm_ffn.1." in k: __A = k_new.replace('''pre_norm_ffn.1.''' , '''ffn.conv1.''' ) if "pre_norm_ffn.3." in k: __A = k_new.replace('''pre_norm_ffn.3.''' , '''ffn.conv2.''' ) if "classifier.1." in k: __A = k_new.replace('''classifier.1.''' , '''classifier.''' ) if "seg_head." in k: __A = k_new.replace('''seg_head.''' , '''segmentation_head.''' ) if ".aspp_layer." in k: __A = k_new.replace('''.aspp_layer.''' , '''.''' ) if ".aspp_pool." in k: __A = k_new.replace('''.aspp_pool.''' , '''.''' ) rename_keys.append((k, k_new) ) return rename_keys def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = [] for k in state_dict.keys(): if k.startswith('''seg_head.aux_head.''' ): keys_to_ignore.append(__UpperCamelCase ) for k in keys_to_ignore: state_dict.pop(__UpperCamelCase , __UpperCamelCase ) def lowerCAmelCase ( ): """simple docstring""" __A = '''http://images.cocodataset.org/val2017/000000039769.jpg''' # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" __A = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw ) return im @torch.no_grad() def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" __A = get_mobilevitva_config(__UpperCamelCase , __UpperCamelCase ) # load original state_dict __A = torch.load(__UpperCamelCase , map_location='''cpu''' ) # load huggingface model if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ): __A = MobileViTVaForSemanticSegmentation(__UpperCamelCase ).eval() __A = False else: __A = MobileViTVaForImageClassification(__UpperCamelCase ).eval() __A = False # remove and rename some keys of load the original model __A = checkpoint remove_unused_keys(__UpperCamelCase ) __A = create_rename_keys(__UpperCamelCase , base_model=__UpperCamelCase ) for rename_key_src, rename_key_dest in rename_keys: rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # load modified state_dict model.load_state_dict(__UpperCamelCase ) # Check outputs on an image, prepared by MobileViTImageProcessor __A = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 3_2 ) __A = image_processor(images=prepare_img() , return_tensors='''pt''' ) __A = model(**__UpperCamelCase ) # verify classification model if task_name.startswith('''imagenet''' ): __A = outputs.logits __A = logits.argmax(-1 ).item() print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] ) if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0: # expected_logits for base variant __A = torch.tensor([-1.6_3_3_6e0_0, -7.3_2_0_4e-0_2, -5.1_8_8_3e-0_1] ) assert torch.allclose(logits[0, :3] , __UpperCamelCase , atol=1e-4 ) Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase ) print(f'Saving model {task_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(__UpperCamelCase ) print(f'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--task', default='imagenet1k_256', type=str, help=( 'Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . ' '\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n ' ), choices=[ 'imagenet1k_256', 'imagenet1k_384', 'imagenet21k_to_1k_256', 'imagenet21k_to_1k_384', 'ade20k_deeplabv3', 'voc_deeplabv3', ], ) parser.add_argument( '--orig_checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).' ) parser.add_argument('--orig_config_path', required=True, type=str, help='Path to the original config file.') parser.add_argument( '--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.' ) lowercase_ = parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
266
"""simple docstring""" from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging lowercase_ = logging.get_logger(__name__) class snake_case ( _lowerCAmelCase ): '''simple docstring''' A_ : int = ["input_features", "attention_mask"] def __init__( self : Optional[Any], _lowerCamelCase : Union[str, Any]=80, _lowerCamelCase : int=1_60_00, _lowerCamelCase : Any=80, _lowerCamelCase : List[str]=0.0, _lowerCamelCase : int=True, _lowerCamelCase : Optional[Any]=True, _lowerCamelCase : Optional[int]=True, **_lowerCamelCase : List[str], ): '''simple docstring''' super().__init__(feature_size=_lowerCamelCase, sampling_rate=_lowerCamelCase, padding_value=_lowerCamelCase, **_lowerCamelCase ) __A = num_mel_bins __A = do_ceptral_normalize __A = normalize_means __A = normalize_vars __A = True def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : np.ndarray, ): '''simple docstring''' __A = waveform * (2**15) # Kaldi compliance: 16-bit signed integers __A = torch.from_numpy(_lowerCamelCase ).unsqueeze(0 ) __A = ta_kaldi.fbank(_lowerCamelCase, num_mel_bins=self.num_mel_bins, sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : np.ndarray, _lowerCamelCase : int, _lowerCamelCase : Optional[bool] = True, _lowerCamelCase : Optional[bool] = True, _lowerCamelCase : float = 0.0, ): '''simple docstring''' # make sure we normalize float32 arrays if normalize_means: __A = x[:input_length].mean(axis=0 ) __A = np.subtract(_lowerCamelCase, _lowerCamelCase ) if normalize_vars: __A = x[:input_length].std(axis=0 ) __A = np.divide(_lowerCamelCase, _lowerCamelCase ) if input_length < x.shape[0]: __A = padding_value # make sure array is in float32 __A = x.astype(np.floataa ) return x def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : List[np.ndarray], _lowerCamelCase : Optional[np.ndarray] = None ): '''simple docstring''' __A = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(_lowerCamelCase, _lowerCamelCase, self.normalize_means, self.normalize_vars, self.padding_value ) for x, n in zip(_lowerCamelCase, _lowerCamelCase ) ] def __call__( self : Optional[Any], _lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], _lowerCamelCase : Union[bool, str, PaddingStrategy] = False, _lowerCamelCase : Optional[int] = None, _lowerCamelCase : bool = False, _lowerCamelCase : Optional[int] = None, _lowerCamelCase : Optional[Union[str, TensorType]] = None, _lowerCamelCase : Optional[int] = None, _lowerCamelCase : Optional[bool] = None, **_lowerCamelCase : Optional[Any], ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of' f' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with' f' {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) __A = isinstance(_lowerCamelCase, np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'Only mono-channel audio is supported for input to {self}' ) __A = is_batched_numpy or ( isinstance(_lowerCamelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) )) ) if is_batched: __A = [np.asarray(_lowerCamelCase, dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(_lowerCamelCase, np.ndarray ): __A = np.asarray(_lowerCamelCase, dtype=np.floataa ) elif isinstance(_lowerCamelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __A = raw_speech.astype(np.floataa ) # always return batch if not is_batched: __A = [raw_speech] # extract fbank features __A = [self._extract_fbank_features(_lowerCamelCase ) for waveform in raw_speech] # convert into correct format for padding __A = BatchFeature({'''input_features''': features} ) __A = self.pad( _lowerCamelCase, padding=_lowerCamelCase, max_length=_lowerCamelCase, truncation=_lowerCamelCase, pad_to_multiple_of=_lowerCamelCase, return_attention_mask=_lowerCamelCase, **_lowerCamelCase, ) # make sure list is in array format __A = padded_inputs.get('''input_features''' ) if isinstance(input_features[0], _lowerCamelCase ): __A = [np.asarray(_lowerCamelCase, dtype=np.floataa ) for feature in input_features] __A = padded_inputs.get('''attention_mask''' ) if attention_mask is not None: __A = [np.asarray(_lowerCamelCase, dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: __A = ( np.array(_lowerCamelCase, dtype=np.intaa ) if self._get_padding_strategies(_lowerCamelCase, max_length=_lowerCamelCase ) is not PaddingStrategy.DO_NOT_PAD else None ) __A = self.normalize( padded_inputs['''input_features'''], attention_mask=_lowerCamelCase ) if return_tensors is not None: __A = padded_inputs.convert_to_tensors(_lowerCamelCase ) return padded_inputs
266
1
"""simple docstring""" from __future__ import annotations from typing import Generic, TypeVar lowercase_ = TypeVar('T') class snake_case ( Generic[T] ): '''simple docstring''' def __init__( self : Optional[int], _lowerCamelCase : T ): '''simple docstring''' __A = data __A = self __A = 0 class snake_case ( Generic[T] ): '''simple docstring''' def __init__( self : Dict ): '''simple docstring''' # map from node name to the node object __A = {} def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : T ): '''simple docstring''' # create a new set with x as its member __A = DisjointSetTreeNode(_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : T ): '''simple docstring''' # find the set x belongs to (with path-compression) __A = self.map[data] if elem_ref != elem_ref.parent: __A = self.find_set(elem_ref.parent.data ) return elem_ref.parent def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : DisjointSetTreeNode[T], _lowerCamelCase : DisjointSetTreeNode[T] ): '''simple docstring''' # helper function for union operation if nodea.rank > nodea.rank: __A = nodea else: __A = nodea if nodea.rank == nodea.rank: nodea.rank += 1 def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : T, _lowerCamelCase : T ): '''simple docstring''' # merge 2 disjoint sets self.link(self.find_set(_lowerCamelCase ), self.find_set(_lowerCamelCase ) ) class snake_case ( Generic[T] ): '''simple docstring''' def __init__( self : Dict ): '''simple docstring''' # connections: map from the node to the neighbouring nodes (with weights) __A = {} def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : T ): '''simple docstring''' # add a node ONLY if its not present in the graph if node not in self.connections: __A = {} def _SCREAMING_SNAKE_CASE ( self : int, _lowerCamelCase : T, _lowerCamelCase : T, _lowerCamelCase : int ): '''simple docstring''' # add an edge with the given weight self.add_node(_lowerCamelCase ) self.add_node(_lowerCamelCase ) __A = weight __A = weight def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' __A = [] __A = set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start) ) edges.append((start, end, self.connections[start][end]) ) edges.sort(key=lambda _lowerCamelCase : x[2] ) # creating the disjoint set __A = DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(_lowerCamelCase ) # MST generation __A = 0 __A = 0 __A = GraphUndirectedWeighted[T]() while num_edges < len(self.connections ) - 1: __A , __A , __A = edges[index] index += 1 __A = disjoint_set.find_set(_lowerCamelCase ) __A = disjoint_set.find_set(_lowerCamelCase ) if parent_u != parent_v: num_edges += 1 graph.add_edge(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) disjoint_set.union(_lowerCamelCase, _lowerCamelCase ) return graph
266
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class snake_case ( unittest.TestCase ): '''simple docstring''' def __init__( self : List[str], _lowerCamelCase : Optional[Any], _lowerCamelCase : Union[str, Any]=13, _lowerCamelCase : Any=3, _lowerCamelCase : Optional[int]=2_24, _lowerCamelCase : str=30, _lowerCamelCase : Dict=4_00, _lowerCamelCase : Union[str, Any]=True, _lowerCamelCase : Any=None, _lowerCamelCase : Optional[Any]=True, _lowerCamelCase : Any=[0.5, 0.5, 0.5], _lowerCamelCase : List[str]=[0.5, 0.5, 0.5], ): '''simple docstring''' __A = size if size is not None else {'''height''': 18, '''width''': 18} __A = parent __A = batch_size __A = num_channels __A = image_size __A = min_resolution __A = max_resolution __A = do_resize __A = size __A = do_normalize __A = image_mean __A = image_std def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : str = ViTImageProcessor if is_vision_available() else None def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = EfficientFormerImageProcessorTester(self ) @property def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' return self.image_proc_tester.prepare_image_processor_dict() def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCamelCase, '''image_mean''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''image_std''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''do_normalize''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''do_resize''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''size''' ) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' # Initialize image_processor __A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __A = prepare_image_inputs(self.image_proc_tester, equal_resolution=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, Image.Image ) # Test not batched input __A = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), ) # Test batched __A = image_processor(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), ) def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' # Initialize image_processor __A = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __A = prepare_image_inputs(self.image_proc_tester, equal_resolution=_lowerCamelCase, numpify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, np.ndarray ) # Test not batched input __A = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), ) # Test batched __A = image_processor(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), ) def _SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' # Initialize image_processor __A = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __A = prepare_image_inputs(self.image_proc_tester, equal_resolution=_lowerCamelCase, torchify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, torch.Tensor ) # Test not batched input __A = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), ) # Test batched __A = image_processor(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), )
266
1
"""simple docstring""" import datasets from .evaluate import evaluate lowercase_ = '\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n' lowercase_ = '\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n' lowercase_ = '\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the SQuAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> squad_metric = datasets.load_metric("squad")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case ( datasets.Metric ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : List[str] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { '''predictions''': {'''id''': datasets.Value('''string''' ), '''prediction_text''': datasets.Value('''string''' )}, '''references''': { '''id''': datasets.Value('''string''' ), '''answers''': datasets.features.Sequence( { '''text''': datasets.Value('''string''' ), '''answer_start''': datasets.Value('''int32''' ), } ), }, } ), codebase_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''], reference_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''], ) def _SCREAMING_SNAKE_CASE ( self : int, _lowerCamelCase : Any, _lowerCamelCase : Optional[Any] ): '''simple docstring''' __A = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions} __A = [ { '''paragraphs''': [ { '''qas''': [ { '''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']], '''id''': ref['''id'''], } for ref in references ] } ] } ] __A = evaluate(dataset=_lowerCamelCase, predictions=_lowerCamelCase ) return score
266
"""simple docstring""" import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor lowercase_ = logging.get_logger(__name__) class snake_case ( _lowerCAmelCase ): '''simple docstring''' def __init__( self : Optional[int], *_lowerCamelCase : Union[str, Any], **_lowerCamelCase : Dict ): '''simple docstring''' warnings.warn( '''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use SegformerImageProcessor instead.''', _lowerCamelCase, ) super().__init__(*_lowerCamelCase, **_lowerCamelCase )
266
1
"""simple docstring""" from tempfile import TemporaryDirectory from unittest import TestCase from unittest.mock import MagicMock, patch from transformers import AutoModel, TFAutoModel from transformers.onnx import FeaturesManager from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch @require_torch @require_tf class snake_case ( _lowerCAmelCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' __A = SMALL_MODEL_IDENTIFIER __A = '''pt''' __A = '''tf''' def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : int ): '''simple docstring''' __A = AutoModel.from_pretrained(self.test_model ) model_pt.save_pretrained(_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : Optional[Any] ): '''simple docstring''' __A = TFAutoModel.from_pretrained(self.test_model, from_pt=_lowerCamelCase ) model_tf.save_pretrained(_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' __A = '''mock_framework''' # Framework provided - return whatever the user provides __A = FeaturesManager.determine_framework(self.test_model, _lowerCamelCase ) self.assertEqual(_lowerCamelCase, _lowerCamelCase ) # Local checkpoint and framework provided - return provided framework # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(_lowerCamelCase ) __A = FeaturesManager.determine_framework(_lowerCamelCase, _lowerCamelCase ) self.assertEqual(_lowerCamelCase, _lowerCamelCase ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(_lowerCamelCase ) __A = FeaturesManager.determine_framework(_lowerCamelCase, _lowerCamelCase ) self.assertEqual(_lowerCamelCase, _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(_lowerCamelCase ) __A = FeaturesManager.determine_framework(_lowerCamelCase ) self.assertEqual(_lowerCamelCase, self.framework_pt ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(_lowerCamelCase ) __A = FeaturesManager.determine_framework(_lowerCamelCase ) self.assertEqual(_lowerCamelCase, self.framework_tf ) # Invalid local checkpoint with TemporaryDirectory() as local_invalid_ckpt: with self.assertRaises(_lowerCamelCase ): __A = FeaturesManager.determine_framework(_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' __A = MagicMock(return_value=_lowerCamelCase ) with patch('''transformers.onnx.features.is_tf_available''', _lowerCamelCase ): __A = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(_lowerCamelCase, self.framework_pt ) # PyTorch not in environment -> use TensorFlow __A = MagicMock(return_value=_lowerCamelCase ) with patch('''transformers.onnx.features.is_torch_available''', _lowerCamelCase ): __A = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(_lowerCamelCase, self.framework_tf ) # Both in environment -> use PyTorch __A = MagicMock(return_value=_lowerCamelCase ) __A = MagicMock(return_value=_lowerCamelCase ) with patch('''transformers.onnx.features.is_tf_available''', _lowerCamelCase ), patch( '''transformers.onnx.features.is_torch_available''', _lowerCamelCase ): __A = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(_lowerCamelCase, self.framework_pt ) # Both not in environment -> raise error __A = MagicMock(return_value=_lowerCamelCase ) __A = MagicMock(return_value=_lowerCamelCase ) with patch('''transformers.onnx.features.is_tf_available''', _lowerCamelCase ), patch( '''transformers.onnx.features.is_torch_available''', _lowerCamelCase ): with self.assertRaises(_lowerCamelCase ): __A = FeaturesManager.determine_framework(self.test_model )
266
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class snake_case ( unittest.TestCase ): '''simple docstring''' def __init__( self : List[Any], _lowerCamelCase : int, _lowerCamelCase : List[Any]=7, _lowerCamelCase : int=3, _lowerCamelCase : Optional[Any]=18, _lowerCamelCase : Any=30, _lowerCamelCase : str=4_00, _lowerCamelCase : int=True, _lowerCamelCase : Union[str, Any]=None, _lowerCamelCase : str=True, ): '''simple docstring''' __A = size if size is not None else {'''height''': 18, '''width''': 18} __A = parent __A = batch_size __A = num_channels __A = image_size __A = min_resolution __A = max_resolution __A = do_resize __A = size __A = apply_ocr def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : Optional[int] = LayoutLMvaImageProcessor if is_pytesseract_available() else None def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = LayoutLMvaImageProcessingTester(self ) @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' __A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCamelCase, '''do_resize''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''size''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''apply_ocr''' ) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' __A = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size, {'''height''': 18, '''width''': 18} ) __A = self.image_processing_class.from_dict(self.image_processor_dict, size=42 ) self.assertEqual(image_processor.size, {'''height''': 42, '''width''': 42} ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' # Initialize image_processing __A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, Image.Image ) # Test not batched input __A = image_processing(image_inputs[0], return_tensors='''pt''' ) self.assertEqual( encoding.pixel_values.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) self.assertIsInstance(encoding.words, _lowerCamelCase ) self.assertIsInstance(encoding.boxes, _lowerCamelCase ) # Test batched __A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' # Initialize image_processing __A = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase, numpify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, np.ndarray ) # Test not batched input __A = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) # Test batched __A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' # Initialize image_processing __A = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase, torchify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, torch.Tensor ) # Test not batched input __A = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) # Test batched __A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) def _SCREAMING_SNAKE_CASE ( self : List[str] ): '''simple docstring''' # with apply_OCR = True __A = LayoutLMvaImageProcessor() from datasets import load_dataset __A = load_dataset('''hf-internal-testing/fixtures_docvqa''', split='''test''' ) __A = Image.open(ds[0]['''file'''] ).convert('''RGB''' ) __A = image_processing(_lowerCamelCase, return_tensors='''pt''' ) self.assertEqual(encoding.pixel_values.shape, (1, 3, 2_24, 2_24) ) self.assertEqual(len(encoding.words ), len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 __A = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231 __A = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words, _lowerCamelCase ) self.assertListEqual(encoding.boxes, _lowerCamelCase ) # with apply_OCR = False __A = LayoutLMvaImageProcessor(apply_ocr=_lowerCamelCase ) __A = image_processing(_lowerCamelCase, return_tensors='''pt''' ) self.assertEqual(encoding.pixel_values.shape, (1, 3, 2_24, 2_24) )
266
1
"""simple docstring""" # Lint as: python3 import dataclasses import re from dataclasses import dataclass from functools import total_ordering from typing import Optional, Union lowercase_ = re.compile(R'^(?P<major>\d+)' R'\.(?P<minor>\d+)' R'\.(?P<patch>\d+)$') @total_ordering @dataclass class snake_case : '''simple docstring''' A_ : str A_ : Optional[str] = None A_ : Optional[Union[str, int]] = None A_ : Optional[Union[str, int]] = None A_ : Optional[Union[str, int]] = None def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' __A , __A , __A = _str_to_version_tuple(self.version_str ) def __repr__( self : Any ): '''simple docstring''' return f'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}' @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' return self.major, self.minor, self.patch def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : Optional[int] ): '''simple docstring''' if isinstance(_lowerCamelCase, _lowerCamelCase ): return Version(_lowerCamelCase ) elif isinstance(_lowerCamelCase, _lowerCamelCase ): return other raise TypeError(f'{other} (type {type(_lowerCamelCase )}) cannot be compared to version.' ) def __eq__( self : Optional[int], _lowerCamelCase : str ): '''simple docstring''' try: __A = self._validate_operand(_lowerCamelCase ) except (TypeError, ValueError): return False else: return self.tuple == other.tuple def __lt__( self : int, _lowerCamelCase : List[str] ): '''simple docstring''' __A = self._validate_operand(_lowerCamelCase ) return self.tuple < other.tuple def __hash__( self : Any ): '''simple docstring''' return hash(_version_tuple_to_str(self.tuple ) ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any], _lowerCamelCase : Dict ): '''simple docstring''' __A = {f.name for f in dataclasses.fields(cls )} return cls(**{k: v for k, v in dic.items() if k in field_names} ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' return self.version_str def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = _VERSION_REG.match(__UpperCamelCase ) if not res: raise ValueError(f'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' ) return tuple(int(__UpperCamelCase ) for v in [res.group('''major''' ), res.group('''minor''' ), res.group('''patch''' )] ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" return ".".join(str(__UpperCamelCase ) for v in version_tuple )
266
"""simple docstring""" import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class snake_case ( ctypes.Structure ): '''simple docstring''' A_ : List[str] = [("size", ctypes.c_int), ("visible", ctypes.c_byte)] def lowerCAmelCase ( ): """simple docstring""" if os.name == "nt": __A = CursorInfo() __A = ctypes.windll.kernelaa.GetStdHandle(-1_1 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) ) __A = False ctypes.windll.kernelaa.SetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) ) elif os.name == "posix": sys.stdout.write('''\033[?25l''' ) sys.stdout.flush() def lowerCAmelCase ( ): """simple docstring""" if os.name == "nt": __A = CursorInfo() __A = ctypes.windll.kernelaa.GetStdHandle(-1_1 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) ) __A = True ctypes.windll.kernelaa.SetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) ) elif os.name == "posix": sys.stdout.write('''\033[?25h''' ) sys.stdout.flush() @contextmanager def lowerCAmelCase ( ): """simple docstring""" try: hide_cursor() yield finally: show_cursor()
266
1
"""simple docstring""" import argparse import torch from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel from transformers.utils import logging logging.set_verbosity_info() def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" __A = FunnelConfig.from_json_file(__UpperCamelCase ) print(f'Building PyTorch model from configuration: {config}' ) __A = FunnelBaseModel(__UpperCamelCase ) if base_model else FunnelModel(__UpperCamelCase ) # Load weights from tf checkpoint load_tf_weights_in_funnel(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # Save pytorch-model print(f'Save PyTorch model to {pytorch_dump_path}' ) torch.save(model.state_dict() , __UpperCamelCase ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--base_model', action='store_true', help='Whether you want just the base model (no decoder) or not.' ) lowercase_ = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model )
266
"""simple docstring""" import argparse import struct import unittest class snake_case : '''simple docstring''' def __init__( self : Optional[int], _lowerCamelCase : bytes ): '''simple docstring''' __A = data # Initialize hash values __A = [ 0X6a_09e_667, 0Xbb_67a_e85, 0X3c_6ef_372, 0Xa5_4ff_53a, 0X51_0e5_27f, 0X9b_056_88c, 0X1f_83d_9ab, 0X5b_e0c_d19, ] # Initialize round constants __A = [ 0X42_8a2_f98, 0X71_374_491, 0Xb5_c0f_bcf, 0Xe9_b5d_ba5, 0X39_56c_25b, 0X59_f11_1f1, 0X92_3f8_2a4, 0Xab_1c5_ed5, 0Xd8_07a_a98, 0X12_835_b01, 0X24_318_5be, 0X55_0c7_dc3, 0X72_be5_d74, 0X80_deb_1fe, 0X9b_dc0_6a7, 0Xc1_9bf_174, 0Xe4_9b6_9c1, 0Xef_be4_786, 0X0f_c19_dc6, 0X24_0ca_1cc, 0X2d_e92_c6f, 0X4a_748_4aa, 0X5c_b0a_9dc, 0X76_f98_8da, 0X98_3e5_152, 0Xa8_31c_66d, 0Xb0_032_7c8, 0Xbf_597_fc7, 0Xc6_e00_bf3, 0Xd5_a79_147, 0X06_ca6_351, 0X14_292_967, 0X27_b70_a85, 0X2e_1b2_138, 0X4d_2c6_dfc, 0X53_380_d13, 0X65_0a7_354, 0X76_6a0_abb, 0X81_c2c_92e, 0X92_722_c85, 0Xa2_bfe_8a1, 0Xa8_1a6_64b, 0Xc2_4b8_b70, 0Xc7_6c5_1a3, 0Xd1_92e_819, 0Xd6_990_624, 0Xf4_0e3_585, 0X10_6aa_070, 0X19_a4c_116, 0X1e_376_c08, 0X27_487_74c, 0X34_b0b_cb5, 0X39_1c0_cb3, 0X4e_d8a_a4a, 0X5b_9cc_a4f, 0X68_2e6_ff3, 0X74_8f8_2ee, 0X78_a56_36f, 0X84_c87_814, 0X8c_c70_208, 0X90_bef_ffa, 0Xa4_506_ceb, 0Xbe_f9a_3f7, 0Xc6_717_8f2, ] __A = self.preprocessing(self.data ) self.final_hash() @staticmethod def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : bytes ): '''simple docstring''' __A = b'''\x80''' + (b'''\x00''' * (63 - (len(_lowerCamelCase ) + 8) % 64)) __A = struct.pack('''>Q''', (len(_lowerCamelCase ) * 8) ) return data + padding + big_endian_integer def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' # Convert into blocks of 64 bytes __A = [ self.preprocessed_data[x : x + 64] for x in range(0, len(self.preprocessed_data ), 64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers __A = list(struct.unpack('''>16L''', _lowerCamelCase ) ) # add 48 0-ed integers words += [0] * 48 __A , __A , __A , __A , __A , __A , __A , __A = self.hashes for index in range(0, 64 ): if index > 15: # modify the zero-ed indexes at the end of the array __A = ( self.ror(words[index - 15], 7 ) ^ self.ror(words[index - 15], 18 ) ^ (words[index - 15] >> 3) ) __A = ( self.ror(words[index - 2], 17 ) ^ self.ror(words[index - 2], 19 ) ^ (words[index - 2] >> 10) ) __A = ( words[index - 16] + sa + words[index - 7] + sa ) % 0X100_000_000 # Compression __A = self.ror(_lowerCamelCase, 6 ) ^ self.ror(_lowerCamelCase, 11 ) ^ self.ror(_lowerCamelCase, 25 ) __A = (e & f) ^ ((~e & 0Xff_fff_fff) & g) __A = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0X100_000_000 __A = self.ror(_lowerCamelCase, 2 ) ^ self.ror(_lowerCamelCase, 13 ) ^ self.ror(_lowerCamelCase, 22 ) __A = (a & b) ^ (a & c) ^ (b & c) __A = (sa + maj) % 0X100_000_000 __A , __A , __A , __A , __A , __A , __A , __A = ( g, f, e, ((d + tempa) % 0X100_000_000), c, b, a, ((tempa + tempa) % 0X100_000_000), ) __A = [a, b, c, d, e, f, g, h] # Modify final values __A = [ ((element + mutated_hash_values[index]) % 0X100_000_000) for index, element in enumerate(self.hashes ) ] __A = ''''''.join([hex(_lowerCamelCase )[2:].zfill(8 ) for value in self.hashes] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : int, _lowerCamelCase : int ): '''simple docstring''' return 0Xff_fff_fff & (value << (32 - rotations)) | (value >> rotations) class snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' import hashlib __A = bytes('''Test String''', '''utf-8''' ) self.assertEqual(SHAaaa(_lowerCamelCase ).hash, hashlib.shaaaa(_lowerCamelCase ).hexdigest() ) def lowerCAmelCase ( ): """simple docstring""" import doctest doctest.testmod() __A = argparse.ArgumentParser() parser.add_argument( '''-s''' , '''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , ) parser.add_argument( '''-f''' , '''--file''' , dest='''input_file''' , help='''Hash contents of a file''' ) __A = parser.parse_args() __A = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , '''rb''' ) as f: __A = f.read() else: __A = bytes(__UpperCamelCase , '''utf-8''' ) print(SHAaaa(__UpperCamelCase ).hash ) if __name__ == "__main__": main()
266
1
"""simple docstring""" from __future__ import annotations def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ): """simple docstring""" if (stress, tangential_force, area).count(0 ) != 1: raise ValueError('''You cannot supply more or less than 2 values''' ) elif stress < 0: raise ValueError('''Stress cannot be negative''' ) elif tangential_force < 0: raise ValueError('''Tangential Force cannot be negative''' ) elif area < 0: raise ValueError('''Area cannot be negative''' ) elif stress == 0: return ( "stress", tangential_force / area, ) elif tangential_force == 0: return ( "tangential_force", stress * area, ) else: return ( "area", tangential_force / stress, ) if __name__ == "__main__": import doctest doctest.testmod()
266
"""simple docstring""" import faiss # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import requests # noqa: F401 # Here to have a nice missing dependency error message early on import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on from mauve import compute_mauve # From: mauve-text import datasets lowercase_ = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n' lowercase_ = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n' lowercase_ = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case ( datasets.Metric ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, homepage='''https://github.com/krishnap25/mauve''', inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { '''predictions''': datasets.Value('''string''', id='''sequence''' ), '''references''': datasets.Value('''string''', id='''sequence''' ), } ), codebase_urls=['''https://github.com/krishnap25/mauve'''], reference_urls=[ '''https://arxiv.org/abs/2102.01454''', '''https://github.com/krishnap25/mauve''', ], ) def _SCREAMING_SNAKE_CASE ( self : int, _lowerCamelCase : str, _lowerCamelCase : Optional[Any], _lowerCamelCase : Any=None, _lowerCamelCase : Tuple=None, _lowerCamelCase : Optional[Any]=None, _lowerCamelCase : Union[str, Any]=None, _lowerCamelCase : str="auto", _lowerCamelCase : Union[str, Any]=-1, _lowerCamelCase : List[str]=0.9, _lowerCamelCase : int=5, _lowerCamelCase : Tuple=5_00, _lowerCamelCase : Union[str, Any]="gpt2-large", _lowerCamelCase : int=-1, _lowerCamelCase : Union[str, Any]=10_24, _lowerCamelCase : Union[str, Any]=25, _lowerCamelCase : str=5, _lowerCamelCase : Any=True, _lowerCamelCase : Union[str, Any]=25, ): '''simple docstring''' __A = compute_mauve( p_text=_lowerCamelCase, q_text=_lowerCamelCase, p_features=_lowerCamelCase, q_features=_lowerCamelCase, p_tokens=_lowerCamelCase, q_tokens=_lowerCamelCase, num_buckets=_lowerCamelCase, pca_max_data=_lowerCamelCase, kmeans_explained_var=_lowerCamelCase, kmeans_num_redo=_lowerCamelCase, kmeans_max_iter=_lowerCamelCase, featurize_model_name=_lowerCamelCase, device_id=_lowerCamelCase, max_text_length=_lowerCamelCase, divergence_curve_discretization_size=_lowerCamelCase, mauve_scaling_factor=_lowerCamelCase, verbose=_lowerCamelCase, seed=_lowerCamelCase, ) return out
266
1
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class snake_case ( unittest.TestCase ): '''simple docstring''' def __init__( self : List[Any], _lowerCamelCase : int, _lowerCamelCase : List[Any]=7, _lowerCamelCase : int=3, _lowerCamelCase : Optional[Any]=18, _lowerCamelCase : Any=30, _lowerCamelCase : str=4_00, _lowerCamelCase : int=True, _lowerCamelCase : Union[str, Any]=None, _lowerCamelCase : str=True, ): '''simple docstring''' __A = size if size is not None else {'''height''': 18, '''width''': 18} __A = parent __A = batch_size __A = num_channels __A = image_size __A = min_resolution __A = max_resolution __A = do_resize __A = size __A = apply_ocr def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : Optional[int] = LayoutLMvaImageProcessor if is_pytesseract_available() else None def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = LayoutLMvaImageProcessingTester(self ) @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' __A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCamelCase, '''do_resize''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''size''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''apply_ocr''' ) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' __A = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size, {'''height''': 18, '''width''': 18} ) __A = self.image_processing_class.from_dict(self.image_processor_dict, size=42 ) self.assertEqual(image_processor.size, {'''height''': 42, '''width''': 42} ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' # Initialize image_processing __A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, Image.Image ) # Test not batched input __A = image_processing(image_inputs[0], return_tensors='''pt''' ) self.assertEqual( encoding.pixel_values.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) self.assertIsInstance(encoding.words, _lowerCamelCase ) self.assertIsInstance(encoding.boxes, _lowerCamelCase ) # Test batched __A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' # Initialize image_processing __A = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase, numpify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, np.ndarray ) # Test not batched input __A = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) # Test batched __A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' # Initialize image_processing __A = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase, torchify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, torch.Tensor ) # Test not batched input __A = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) # Test batched __A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) def _SCREAMING_SNAKE_CASE ( self : List[str] ): '''simple docstring''' # with apply_OCR = True __A = LayoutLMvaImageProcessor() from datasets import load_dataset __A = load_dataset('''hf-internal-testing/fixtures_docvqa''', split='''test''' ) __A = Image.open(ds[0]['''file'''] ).convert('''RGB''' ) __A = image_processing(_lowerCamelCase, return_tensors='''pt''' ) self.assertEqual(encoding.pixel_values.shape, (1, 3, 2_24, 2_24) ) self.assertEqual(len(encoding.words ), len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 __A = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231 __A = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words, _lowerCamelCase ) self.assertListEqual(encoding.boxes, _lowerCamelCase ) # with apply_OCR = False __A = LayoutLMvaImageProcessor(apply_ocr=_lowerCamelCase ) __A = image_processing(_lowerCamelCase, return_tensors='''pt''' ) self.assertEqual(encoding.pixel_values.shape, (1, 3, 2_24, 2_24) )
266
"""simple docstring""" import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs lowercase_ = imread(R'digital_image_processing/image_data/lena_small.jpg') lowercase_ = cvtColor(img, COLOR_BGR2GRAY) def lowerCAmelCase ( ): """simple docstring""" __A = cn.convert_to_negative(__UpperCamelCase ) # assert negative_img array for at least one True assert negative_img.any() def lowerCAmelCase ( ): """simple docstring""" with Image.open('''digital_image_processing/image_data/lena_small.jpg''' ) as img: # Work around assertion for response assert str(cc.change_contrast(__UpperCamelCase , 1_1_0 ) ).startswith( '''<PIL.Image.Image image mode=RGB size=100x100 at''' ) def lowerCAmelCase ( ): """simple docstring""" __A = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def lowerCAmelCase ( ): """simple docstring""" __A = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0 ) # assert ambiguous array for all == True assert canny_img.all() __A = canny.canny(__UpperCamelCase ) # assert canny array for at least one True assert canny_array.any() def lowerCAmelCase ( ): """simple docstring""" assert gg.gaussian_filter(__UpperCamelCase , 5 , sigma=0.9 ).all() def lowerCAmelCase ( ): """simple docstring""" __A = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) __A = conv.img_convolve(__UpperCamelCase , __UpperCamelCase ).astype(__UpperCamelCase ) assert res.any() def lowerCAmelCase ( ): """simple docstring""" assert med.median_filter(__UpperCamelCase , 3 ).any() def lowerCAmelCase ( ): """simple docstring""" __A , __A = sob.sobel_filter(__UpperCamelCase ) assert grad.any() and theta.any() def lowerCAmelCase ( ): """simple docstring""" __A = sp.make_sepia(__UpperCamelCase , 2_0 ) assert sepia.all() def lowerCAmelCase ( __UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" ): """simple docstring""" __A = bs.Burkes(imread(__UpperCamelCase , 1 ) , 1_2_0 ) burkes.process() assert burkes.output_img.any() def lowerCAmelCase ( __UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" , ): """simple docstring""" __A = rs.NearestNeighbour(imread(__UpperCamelCase , 1 ) , 4_0_0 , 2_0_0 ) nn.process() assert nn.output.any() def lowerCAmelCase ( ): """simple docstring""" __A = '''digital_image_processing/image_data/lena.jpg''' # Reading the image and converting it to grayscale. __A = imread(__UpperCamelCase , 0 ) # Test for get_neighbors_pixel function() return not None __A = 0 __A = 0 __A = image[x_coordinate][y_coordinate] __A = lbp.get_neighbors_pixel( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image __A = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): __A = lbp.local_binary_value(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) assert lbp_image.any()
266
1
"""simple docstring""" def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" try: __A = float(__UpperCamelCase ) except ValueError: raise ValueError('''Please enter a valid number''' ) __A = decimal - int(__UpperCamelCase ) if fractional_part == 0: return int(__UpperCamelCase ), 1 else: __A = len(str(__UpperCamelCase ).split('''.''' )[1] ) __A = int(decimal * (1_0**number_of_frac_digits) ) __A = 1_0**number_of_frac_digits __A , __A = denominator, numerator while True: __A = dividend % divisor if remainder == 0: break __A , __A = divisor, remainder __A , __A = numerator / divisor, denominator / divisor return int(__UpperCamelCase ), int(__UpperCamelCase ) if __name__ == "__main__": print(F'''{decimal_to_fraction(2) = }''') print(F'''{decimal_to_fraction(89.0) = }''') print(F'''{decimal_to_fraction("67") = }''') print(F'''{decimal_to_fraction("45.0") = }''') print(F'''{decimal_to_fraction(1.5) = }''') print(F'''{decimal_to_fraction("6.25") = }''') print(F'''{decimal_to_fraction("78td") = }''')
266
"""simple docstring""" import itertools import random import unittest import numpy as np from transformers import ASTFeatureExtractor from transformers.testing_utils import require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin lowercase_ = random.Random() if is_torch_available(): import torch def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase=1.0 , __UpperCamelCase=None , __UpperCamelCase=None ): """simple docstring""" if rng is None: __A = global_rng __A = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class snake_case ( unittest.TestCase ): '''simple docstring''' def __init__( self : Any, _lowerCamelCase : List[str], _lowerCamelCase : Any=7, _lowerCamelCase : Optional[int]=4_00, _lowerCamelCase : Optional[int]=20_00, _lowerCamelCase : Dict=1, _lowerCamelCase : Optional[Any]=0.0, _lowerCamelCase : int=1_60_00, _lowerCamelCase : Optional[int]=True, _lowerCamelCase : Dict=True, ): '''simple docstring''' __A = parent __A = batch_size __A = min_seq_length __A = max_seq_length __A = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) __A = feature_size __A = padding_value __A = sampling_rate __A = return_attention_mask __A = do_normalize def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : Optional[Any]=False, _lowerCamelCase : int=False ): '''simple docstring''' def _flatten(_lowerCamelCase : List[str] ): return list(itertools.chain(*_lowerCamelCase ) ) if equal_length: __A = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size __A = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff ) ] if numpify: __A = [np.asarray(_lowerCamelCase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : int = ASTFeatureExtractor def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' __A = ASTFeatureExtractionTester(self ) def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' # Tests that all call wrap to encode_plus and batch_encode_plus __A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 __A = [floats_list((1, x) )[0] for x in range(8_00, 14_00, 2_00 )] __A = [np.asarray(_lowerCamelCase ) for speech_input in speech_inputs] # Test not batched input __A = feat_extract(speech_inputs[0], return_tensors='''np''' ).input_values __A = feat_extract(np_speech_inputs[0], return_tensors='''np''' ).input_values self.assertTrue(np.allclose(_lowerCamelCase, _lowerCamelCase, atol=1e-3 ) ) # Test batched __A = feat_extract(_lowerCamelCase, padding=_lowerCamelCase, return_tensors='''np''' ).input_values __A = feat_extract(_lowerCamelCase, padding=_lowerCamelCase, return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(_lowerCamelCase, _lowerCamelCase ): self.assertTrue(np.allclose(_lowerCamelCase, _lowerCamelCase, atol=1e-3 ) ) # Test 2-D numpy arrays are batched. __A = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)] __A = np.asarray(_lowerCamelCase ) __A = feat_extract(_lowerCamelCase, return_tensors='''np''' ).input_values __A = feat_extract(_lowerCamelCase, return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(_lowerCamelCase, _lowerCamelCase ): self.assertTrue(np.allclose(_lowerCamelCase, _lowerCamelCase, atol=1e-3 ) ) @require_torch def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' import torch __A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __A = np.random.rand(1_00 ).astype(np.floataa ) __A = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: __A = feature_extractor.pad([{'''input_values''': inputs}], return_tensors='''np''' ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) __A = feature_extractor.pad([{'''input_values''': inputs}], return_tensors='''pt''' ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : Union[str, Any] ): '''simple docstring''' from datasets import load_dataset __A = load_dataset('''hf-internal-testing/librispeech_asr_dummy''', '''clean''', split='''validation''' ) # automatic decoding with librispeech __A = ds.sort('''id''' ).select(range(_lowerCamelCase ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] @require_torch def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' # fmt: off __A = torch.tensor( [-0.98_94, -1.27_76, -0.90_66, -1.27_76, -0.93_49, -1.26_09, -1.03_86, -1.27_76, -1.15_61, -1.27_76, -1.20_52, -1.27_23, -1.21_90, -1.21_32, -1.27_76, -1.11_33, -1.19_53, -1.13_43, -1.15_84, -1.22_03, -1.17_70, -1.24_74, -1.23_81, -1.19_36, -0.92_70, -0.83_17, -0.80_49, -0.77_06, -0.75_65, -0.78_69] ) # fmt: on __A = self._load_datasamples(1 ) __A = ASTFeatureExtractor() __A = feature_extractor(_lowerCamelCase, return_tensors='''pt''' ).input_values self.assertEquals(input_values.shape, (1, 10_24, 1_28) ) self.assertTrue(torch.allclose(input_values[0, 0, :30], _lowerCamelCase, atol=1e-4 ) )
266
1
"""simple docstring""" import json import logging import math import os import sys from dataclasses import dataclass, field from typing import Optional from datasets import Dataset, load_dataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, AutoConfig, AutoModelForMaskedLM, AutoTokenizer, DataCollatorForWholeWordMask, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process lowercase_ = logging.getLogger(__name__) lowercase_ = list(MODEL_FOR_MASKED_LM_MAPPING.keys()) lowercase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class snake_case : '''simple docstring''' A_ : Optional[str] = field( default=_lowerCAmelCase , metadata={ "help": ( "The model checkpoint for weights initialization.Don't set if you want to train a model from scratch." ) } , ) A_ : Optional[str] = field( default=_lowerCAmelCase , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(_lowerCAmelCase )} , ) A_ : Optional[str] = field( default=_lowerCAmelCase , metadata={ "help": ( "Override some existing default config settings when a model is trained from scratch. Example: " "n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index" ) } , ) A_ : Optional[str] = field( default=_lowerCAmelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) A_ : Optional[str] = field( default=_lowerCAmelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) A_ : Optional[str] = field( default=_lowerCAmelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) A_ : bool = field( default=_lowerCAmelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , ) A_ : str = field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) A_ : bool = field( default=_lowerCAmelCase , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None): raise ValueError( '''--config_overrides can\'t be used in combination with --config_name or --model_name_or_path''' ) @dataclass class snake_case : '''simple docstring''' A_ : Optional[str] = field( default=_lowerCAmelCase , metadata={"help": "The name of the dataset to use (via the datasets library)."} ) A_ : Optional[str] = field( default=_lowerCAmelCase , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) A_ : Optional[str] = field(default=_lowerCAmelCase , metadata={"help": "The input training data file (a text file)."} ) A_ : Optional[str] = field( default=_lowerCAmelCase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , ) A_ : Optional[str] = field( default=_lowerCAmelCase , metadata={"help": "An optional input train ref data file for whole word masking in Chinese."} , ) A_ : Optional[str] = field( default=_lowerCAmelCase , metadata={"help": "An optional input validation ref data file for whole word masking in Chinese."} , ) A_ : bool = field( default=_lowerCAmelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} ) A_ : Optional[int] = field( default=5 , metadata={ "help": "The percentage of the train set used as validation set in case there's no validation split" } , ) A_ : Optional[int] = field( default=_lowerCAmelCase , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated. Default to the max input length of the model." ) } , ) A_ : Optional[int] = field( default=_lowerCAmelCase , metadata={"help": "The number of processes to use for the preprocessing."} , ) A_ : float = field( default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} ) A_ : bool = field( default=_lowerCAmelCase , metadata={ "help": ( "Whether to pad all samples to `max_seq_length`. " "If False, will pad the samples dynamically when batching to the maximum length in the batch." ) } , ) def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' if self.train_file is not None: __A = self.train_file.split('''.''' )[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." if self.validation_file is not None: __A = self.validation_file.split('''.''' )[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" with open(__UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f: __A = [json.loads(__UpperCamelCase ) for line in f.read().splitlines() if (len(__UpperCamelCase ) > 0 and not line.isspace())] assert len(__UpperCamelCase ) == len(__UpperCamelCase ) __A = {c: dataset[c] for c in dataset.column_names} __A = refs return Dataset.from_dict(__UpperCamelCase ) def lowerCAmelCase ( ): """simple docstring""" __A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __A , __A , __A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __A , __A , __A = parser.parse_args_into_dataclasses() # Detecting last checkpoint. __A = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __A = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'Output directory ({training_args.output_dir}) already exists and is not empty. ' '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None: logger.info( f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN ) # Log on each process the small summary: logger.warning( f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('''Training/evaluation parameters %s''' , __UpperCamelCase ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. __A = load_dataset(data_args.dataset_name , data_args.dataset_config_name ) if "validation" not in datasets.keys(): __A = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=f'train[:{data_args.validation_split_percentage}%]' , ) __A = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=f'train[{data_args.validation_split_percentage}%:]' , ) else: __A = {} if data_args.train_file is not None: __A = data_args.train_file if data_args.validation_file is not None: __A = data_args.validation_file __A = data_args.train_file.split('''.''' )[-1] if extension == "txt": __A = '''text''' __A = load_dataset(__UpperCamelCase , data_files=__UpperCamelCase ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __A = { '''cache_dir''': model_args.cache_dir, '''revision''': model_args.model_revision, '''use_auth_token''': True if model_args.use_auth_token else None, } if model_args.config_name: __A = AutoConfig.from_pretrained(model_args.config_name , **__UpperCamelCase ) elif model_args.model_name_or_path: __A = AutoConfig.from_pretrained(model_args.model_name_or_path , **__UpperCamelCase ) else: __A = CONFIG_MAPPING[model_args.model_type]() logger.warning('''You are instantiating a new config instance from scratch.''' ) if model_args.config_overrides is not None: logger.info(f'Overriding config: {model_args.config_overrides}' ) config.update_from_string(model_args.config_overrides ) logger.info(f'New config: {config}' ) __A = { '''cache_dir''': model_args.cache_dir, '''use_fast''': model_args.use_fast_tokenizer, '''revision''': model_args.model_revision, '''use_auth_token''': True if model_args.use_auth_token else None, } if model_args.tokenizer_name: __A = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **__UpperCamelCase ) elif model_args.model_name_or_path: __A = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **__UpperCamelCase ) else: raise ValueError( '''You are instantiating a new tokenizer from scratch. This is not supported by this script.''' '''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' ) if model_args.model_name_or_path: __A = AutoModelForMaskedLM.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('''Training new model from scratch''' ) __A = AutoModelForMaskedLM.from_config(__UpperCamelCase ) model.resize_token_embeddings(len(__UpperCamelCase ) ) # Preprocessing the datasets. # First we tokenize all the texts. if training_args.do_train: __A = datasets['''train'''].column_names else: __A = datasets['''validation'''].column_names __A = '''text''' if '''text''' in column_names else column_names[0] __A = '''max_length''' if data_args.pad_to_max_length else False def tokenize_function(__UpperCamelCase ): # Remove empty lines __A = [line for line in examples['''text'''] if len(__UpperCamelCase ) > 0 and not line.isspace()] return tokenizer(examples['''text'''] , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=data_args.max_seq_length ) __A = datasets.map( __UpperCamelCase , batched=__UpperCamelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , ) # Add the chinese references if provided if data_args.train_ref_file is not None: __A = add_chinese_references(tokenized_datasets['''train'''] , data_args.train_ref_file ) if data_args.validation_ref_file is not None: __A = add_chinese_references( tokenized_datasets['''validation'''] , data_args.validation_ref_file ) # If we have ref files, need to avoid it removed by trainer __A = data_args.train_ref_file or data_args.validation_ref_file if has_ref: __A = False # Data collator # This one will take care of randomly masking the tokens. __A = DataCollatorForWholeWordMask(tokenizer=__UpperCamelCase , mlm_probability=data_args.mlm_probability ) # Initialize our Trainer __A = Trainer( model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=tokenized_datasets['''train'''] if training_args.do_train else None , eval_dataset=tokenized_datasets['''validation'''] if training_args.do_eval else None , tokenizer=__UpperCamelCase , data_collator=__UpperCamelCase , ) # Training if training_args.do_train: if last_checkpoint is not None: __A = last_checkpoint elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ): __A = model_args.model_name_or_path else: __A = None __A = trainer.train(resume_from_checkpoint=__UpperCamelCase ) trainer.save_model() # Saves the tokenizer too for easy upload __A = os.path.join(training_args.output_dir , '''train_results.txt''' ) if trainer.is_world_process_zero(): with open(__UpperCamelCase , '''w''' ) as writer: logger.info('''***** Train results *****''' ) for key, value in sorted(train_result.metrics.items() ): logger.info(f' {key} = {value}' ) writer.write(f'{key} = {value}\n' ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) ) # Evaluation __A = {} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) __A = trainer.evaluate() __A = math.exp(eval_output['''eval_loss'''] ) __A = perplexity __A = os.path.join(training_args.output_dir , '''eval_results_mlm_wwm.txt''' ) if trainer.is_world_process_zero(): with open(__UpperCamelCase , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key, value in sorted(results.items() ): logger.info(f' {key} = {value}' ) writer.write(f'{key} = {value}\n' ) return results def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" main() if __name__ == "__main__": main()
266
"""simple docstring""" def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = current_set.copy() for row_index, row in enumerate(__UpperCamelCase ): __A = row[0] for column_index, column in enumerate(__UpperCamelCase ): if magnitude == 0: __A = column continue __A = column / magnitude # Subtract to cancel term __A = current_set[0] __A = [first_row] __A = current_set[1::] for row in current_set: __A = [] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(__UpperCamelCase ) continue for column_index in range(len(__UpperCamelCase ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(__UpperCamelCase ) # Create next recursion iteration set if len(final_set[0] ) != 3: __A = final_set[0] __A = [] __A = [] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) __A = simplify(__UpperCamelCase ) for i in range(len(__UpperCamelCase ) ): resultant[i].insert(0 , current_first_column[i] ) resultant.insert(0 , __UpperCamelCase ) __A = resultant return final_set def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" if len(__UpperCamelCase ) == 0: raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) __A = len(__UpperCamelCase ) + 1 if any(len(__UpperCamelCase ) != _length for item in equations ): raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) for row in equations: if any(not isinstance(__UpperCamelCase , (int, float) ) for column in row ): raise ValueError('''solve_simultaneous() requires lists of integers''' ) if len(__UpperCamelCase ) == 1: return [equations[0][-1] / equations[0][0]] __A = equations.copy() if any(0 in row for row in data_set ): __A = data_set.copy() __A = [] for row_index, row in enumerate(__UpperCamelCase ): if 0 not in row: __A = data_set.pop(__UpperCamelCase ) break if not full_row: raise ValueError('''solve_simultaneous() requires at least 1 full equation''' ) data_set.insert(0 , __UpperCamelCase ) __A = data_set.copy() __A = simplify(__UpperCamelCase ) __A = simplified[::-1] __A = [] for row in simplified: __A = row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue __A = row.copy()[: len(__UpperCamelCase ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(__UpperCamelCase ) == 0: solutions.append(0 ) continue __A = temp_row[1::] __A = temp_row[::-1] for column_index, column in enumerate(__UpperCamelCase ): current_solution -= column * solutions[column_index] solutions.append(__UpperCamelCase ) __A = [] for item in solutions: final.append(float(round(__UpperCamelCase , 5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() lowercase_ = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
266
1
"""simple docstring""" import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEmbeddings, BertLayer, BertPooler, BertPreTrainedModel, ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = torch.exp(__UpperCamelCase ) __A = torch.sum(__UpperCamelCase , dim=1 ) # sum of exp(x_i) __A = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i) return torch.log(__UpperCamelCase ) - B / A class snake_case ( nn.Module ): '''simple docstring''' def __init__( self : Optional[Any], _lowerCamelCase : Dict ): '''simple docstring''' super().__init__() __A = config.output_attentions __A = config.output_hidden_states __A = nn.ModuleList([BertLayer(_lowerCamelCase ) for _ in range(config.num_hidden_layers )] ) __A = nn.ModuleList([BertHighway(_lowerCamelCase ) for _ in range(config.num_hidden_layers )] ) __A = [-1 for _ in range(config.num_hidden_layers )] def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : List[Any] ): '''simple docstring''' if (type(_lowerCamelCase ) is float) or (type(_lowerCamelCase ) is int): for i in range(len(self.early_exit_entropy ) ): __A = x else: __A = x def _SCREAMING_SNAKE_CASE ( self : Tuple, _lowerCamelCase : Optional[Any] ): '''simple docstring''' __A = pooler.state_dict() for highway in self.highway: for name, param in highway.pooler.state_dict().items(): param.copy_(loaded_model[name] ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : Optional[int], _lowerCamelCase : Optional[Any]=None, _lowerCamelCase : Optional[int]=None, _lowerCamelCase : Any=None, _lowerCamelCase : Optional[int]=None, ): '''simple docstring''' __A = () __A = () __A = () for i, layer_module in enumerate(self.layer ): if self.output_hidden_states: __A = all_hidden_states + (hidden_states,) __A = layer_module( _lowerCamelCase, _lowerCamelCase, head_mask[i], _lowerCamelCase, _lowerCamelCase ) __A = layer_outputs[0] if self.output_attentions: __A = all_attentions + (layer_outputs[1],) __A = (hidden_states,) if self.output_hidden_states: __A = current_outputs + (all_hidden_states,) if self.output_attentions: __A = current_outputs + (all_attentions,) __A = self.highway[i](_lowerCamelCase ) # logits, pooled_output if not self.training: __A = highway_exit[0] __A = entropy(_lowerCamelCase ) __A = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy __A = all_highway_exits + (highway_exit,) if highway_entropy < self.early_exit_entropy[i]: __A = (highway_logits,) + current_outputs[1:] + (all_highway_exits,) raise HighwayException(_lowerCamelCase, i + 1 ) else: __A = all_highway_exits + (highway_exit,) # Add last layer if self.output_hidden_states: __A = all_hidden_states + (hidden_states,) __A = (hidden_states,) if self.output_hidden_states: __A = outputs + (all_hidden_states,) if self.output_attentions: __A = outputs + (all_attentions,) __A = outputs + (all_highway_exits,) return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits @add_start_docstrings( "The Bert Model transformer with early exiting (DeeBERT). " , _lowerCAmelCase , ) class snake_case ( _lowerCAmelCase ): '''simple docstring''' def __init__( self : Tuple, _lowerCamelCase : Optional[Any] ): '''simple docstring''' super().__init__(_lowerCamelCase ) __A = config __A = BertEmbeddings(_lowerCamelCase ) __A = DeeBertEncoder(_lowerCamelCase ) __A = BertPooler(_lowerCamelCase ) self.init_weights() def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' self.encoder.init_highway_pooler(self.pooler ) def _SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' return self.embeddings.word_embeddings def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : Tuple ): '''simple docstring''' __A = value def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : Optional[Any] ): '''simple docstring''' for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(_lowerCamelCase ) @add_start_docstrings_to_model_forward(_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : List[str]=None, _lowerCamelCase : List[str]=None, _lowerCamelCase : Optional[Any]=None, _lowerCamelCase : str=None, _lowerCamelCase : int=None, _lowerCamelCase : Any=None, _lowerCamelCase : Optional[int]=None, _lowerCamelCase : Optional[Any]=None, ): '''simple docstring''' if input_ids is not None and inputs_embeds is not None: raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' ) elif input_ids is not None: __A = input_ids.size() elif inputs_embeds is not None: __A = inputs_embeds.size()[:-1] else: raise ValueError('''You have to specify either input_ids or inputs_embeds''' ) __A = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: __A = torch.ones(_lowerCamelCase, device=_lowerCamelCase ) if encoder_attention_mask is None: __A = torch.ones(_lowerCamelCase, device=_lowerCamelCase ) if token_type_ids is None: __A = torch.zeros(_lowerCamelCase, dtype=torch.long, device=_lowerCamelCase ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. __A = self.get_extended_attention_mask(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if encoder_attention_mask.dim() == 3: __A = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.dim() == 2: __A = encoder_attention_mask[:, None, None, :] __A = encoder_extended_attention_mask.to( dtype=next(self.parameters() ).dtype ) # fp16 compatibility __A = (1.0 - encoder_extended_attention_mask) * -1_00_00.0 # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] __A = self.get_head_mask(_lowerCamelCase, self.config.num_hidden_layers ) __A = self.embeddings( input_ids=_lowerCamelCase, position_ids=_lowerCamelCase, token_type_ids=_lowerCamelCase, inputs_embeds=_lowerCamelCase ) __A = self.encoder( _lowerCamelCase, attention_mask=_lowerCamelCase, head_mask=_lowerCamelCase, encoder_hidden_states=_lowerCamelCase, encoder_attention_mask=_lowerCamelCase, ) __A = encoder_outputs[0] __A = self.pooler(_lowerCamelCase ) __A = ( sequence_output, pooled_output, ) + encoder_outputs[ 1: ] # add hidden_states and attentions if they are here return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits class snake_case ( _lowerCAmelCase ): '''simple docstring''' def __init__( self : List[str], _lowerCamelCase : List[str], _lowerCamelCase : Tuple ): '''simple docstring''' __A = message __A = exit_layer # start from 1! class snake_case ( nn.Module ): '''simple docstring''' def __init__( self : str, _lowerCamelCase : Optional[Any] ): '''simple docstring''' super().__init__() __A = BertPooler(_lowerCamelCase ) __A = nn.Dropout(config.hidden_dropout_prob ) __A = nn.Linear(config.hidden_size, config.num_labels ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : str ): '''simple docstring''' # Pooler __A = encoder_outputs[0] __A = self.pooler(_lowerCamelCase ) # "return" pooler_output # BertModel __A = (pooler_input, pooler_output) + encoder_outputs[1:] # "return" bmodel_output # Dropout and classification __A = bmodel_output[1] __A = self.dropout(_lowerCamelCase ) __A = self.classifier(_lowerCamelCase ) return logits, pooled_output @add_start_docstrings( "Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , _lowerCAmelCase , ) class snake_case ( _lowerCAmelCase ): '''simple docstring''' def __init__( self : Union[str, Any], _lowerCamelCase : Union[str, Any] ): '''simple docstring''' super().__init__(_lowerCamelCase ) __A = config.num_labels __A = config.num_hidden_layers __A = DeeBertModel(_lowerCamelCase ) __A = nn.Dropout(config.hidden_dropout_prob ) __A = nn.Linear(config.hidden_size, self.config.num_labels ) self.init_weights() @add_start_docstrings_to_model_forward(_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : Optional[Any]=None, _lowerCamelCase : Optional[int]=None, _lowerCamelCase : Tuple=None, _lowerCamelCase : List[Any]=None, _lowerCamelCase : List[Any]=None, _lowerCamelCase : List[Any]=None, _lowerCamelCase : Optional[Any]=None, _lowerCamelCase : Optional[Any]=-1, _lowerCamelCase : int=False, ): '''simple docstring''' __A = self.num_layers try: __A = self.bert( _lowerCamelCase, attention_mask=_lowerCamelCase, token_type_ids=_lowerCamelCase, position_ids=_lowerCamelCase, head_mask=_lowerCamelCase, inputs_embeds=_lowerCamelCase, ) # sequence_output, pooled_output, (hidden_states), (attentions), highway exits __A = outputs[1] __A = self.dropout(_lowerCamelCase ) __A = self.classifier(_lowerCamelCase ) __A = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: __A = e.message __A = e.exit_layer __A = outputs[0] if not self.training: __A = entropy(_lowerCamelCase ) __A = [] __A = [] if labels is not None: if self.num_labels == 1: # We are doing regression __A = MSELoss() __A = loss_fct(logits.view(-1 ), labels.view(-1 ) ) else: __A = CrossEntropyLoss() __A = loss_fct(logits.view(-1, self.num_labels ), labels.view(-1 ) ) # work with highway exits __A = [] for highway_exit in outputs[-1]: __A = highway_exit[0] if not self.training: highway_logits_all.append(_lowerCamelCase ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression __A = MSELoss() __A = loss_fct(highway_logits.view(-1 ), labels.view(-1 ) ) else: __A = CrossEntropyLoss() __A = loss_fct(highway_logits.view(-1, self.num_labels ), labels.view(-1 ) ) highway_losses.append(_lowerCamelCase ) if train_highway: __A = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: __A = (loss,) + outputs if not self.training: __A = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: __A = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
266
"""simple docstring""" from __future__ import annotations from typing import Any def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" if not postfix_notation: return 0 __A = {'''+''', '''-''', '''*''', '''/'''} __A = [] for token in postfix_notation: if token in operations: __A , __A = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(__UpperCamelCase ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
266
1
"""simple docstring""" import argparse import collections import torch from flax import traverse_util from tax import checkpoints from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase="attention" ): """simple docstring""" __A = params[f'{prefix}/layers_{i}/{layer_name}/key/kernel'] __A = params[f'{prefix}/layers_{i}/{layer_name}/out/kernel'] __A = params[f'{prefix}/layers_{i}/{layer_name}/query/kernel'] __A = params[f'{prefix}/layers_{i}/{layer_name}/value/kernel'] return k, o, q, v def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ): """simple docstring""" if split_mlp_wi: __A = params[f'{prefix}/layers_{i}/mlp/wi_0/kernel'] __A = params[f'{prefix}/layers_{i}/mlp/wi_1/kernel'] __A = (wi_a, wi_a) else: __A = params[f'{prefix}/layers_{i}/mlp/wi/kernel'] __A = params[f'{prefix}/layers_{i}/mlp/wo/kernel'] return wi, wo def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" return params[f'{prefix}/layers_{i}/{layer_name}/scale'] def lowerCAmelCase ( __UpperCamelCase , *, __UpperCamelCase , __UpperCamelCase ): """simple docstring""" __A = traverse_util.flatten_dict(variables['''target'''] ) __A = {'''/'''.join(__UpperCamelCase ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi __A = '''encoder/layers_0/mlp/wi_0/kernel''' in old print('''Split MLP:''' , __UpperCamelCase ) __A = collections.OrderedDict() # Shared embeddings. __A = old['''token_embedder/embedding'''] # Encoder. for i in range(__UpperCamelCase ): # Block i, layer 0 (Self Attention). __A = tax_layer_norm_lookup(__UpperCamelCase , __UpperCamelCase , '''encoder''' , '''pre_attention_layer_norm''' ) __A , __A , __A , __A = tax_attention_lookup(__UpperCamelCase , __UpperCamelCase , '''encoder''' , '''attention''' ) __A = layer_norm __A = k.T __A = o.T __A = q.T __A = v.T # Block i, layer 1 (MLP). __A = tax_layer_norm_lookup(__UpperCamelCase , __UpperCamelCase , '''encoder''' , '''pre_mlp_layer_norm''' ) __A , __A = tax_mlp_lookup(__UpperCamelCase , __UpperCamelCase , '''encoder''' , __UpperCamelCase ) __A = layer_norm if split_mlp_wi: __A = wi[0].T __A = wi[1].T else: __A = wi.T __A = wo.T __A = old[ '''encoder/relpos_bias/rel_embedding''' ].T __A = old['''encoder/encoder_norm/scale'''] if not is_encoder_only: # Decoder. for i in range(__UpperCamelCase ): # Block i, layer 0 (Self Attention). __A = tax_layer_norm_lookup(__UpperCamelCase , __UpperCamelCase , '''decoder''' , '''pre_self_attention_layer_norm''' ) __A , __A , __A , __A = tax_attention_lookup(__UpperCamelCase , __UpperCamelCase , '''decoder''' , '''self_attention''' ) __A = layer_norm __A = k.T __A = o.T __A = q.T __A = v.T # Block i, layer 1 (Cross Attention). __A = tax_layer_norm_lookup(__UpperCamelCase , __UpperCamelCase , '''decoder''' , '''pre_cross_attention_layer_norm''' ) __A , __A , __A , __A = tax_attention_lookup(__UpperCamelCase , __UpperCamelCase , '''decoder''' , '''encoder_decoder_attention''' ) __A = layer_norm __A = k.T __A = o.T __A = q.T __A = v.T # Block i, layer 2 (MLP). __A = tax_layer_norm_lookup(__UpperCamelCase , __UpperCamelCase , '''decoder''' , '''pre_mlp_layer_norm''' ) __A , __A = tax_mlp_lookup(__UpperCamelCase , __UpperCamelCase , '''decoder''' , __UpperCamelCase ) __A = layer_norm if split_mlp_wi: __A = wi[0].T __A = wi[1].T else: __A = wi.T __A = wo.T __A = old['''decoder/decoder_norm/scale'''] __A = old[ '''decoder/relpos_bias/rel_embedding''' ].T # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: __A = old['''decoder/logits_dense/kernel'''].T return new def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" __A = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: __A = state_dict['''shared.weight'''] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: __A = state_dict['''shared.weight'''] if "lm_head.weight" not in state_dict: # For old 1.0 models. print('''Using shared word embeddings as lm_head.''' ) __A = state_dict['''shared.weight'''] return state_dict def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" __A = checkpoints.load_tax_checkpoint(__UpperCamelCase ) __A = convert_tax_to_pytorch(__UpperCamelCase , num_layers=config.num_layers , is_encoder_only=__UpperCamelCase ) __A = make_state_dict(__UpperCamelCase , __UpperCamelCase ) model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ): """simple docstring""" __A = TaConfig.from_json_file(__UpperCamelCase ) print(f'Building PyTorch model from configuration: {config}' ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: __A = TaEncoderModel(__UpperCamelCase ) else: __A = TaForConditionalGeneration(__UpperCamelCase ) # Load weights from tf checkpoint load_tax_weights_in_ta(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # Save pytorch-model print(f'Save PyTorch model to {pytorch_dump_path}' ) model.save_pretrained(__UpperCamelCase ) # Verify that we can load the checkpoint. model.from_pretrained(__UpperCamelCase ) print('''Done''' ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.') # Required parameters parser.add_argument( '--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False ) lowercase_ = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only )
266
"""simple docstring""" import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class snake_case ( unittest.TestCase ): '''simple docstring''' def __init__( self : Optional[Any], _lowerCamelCase : Tuple, _lowerCamelCase : List[str]=13, _lowerCamelCase : Optional[Any]=7, _lowerCamelCase : Optional[int]=True, _lowerCamelCase : int=True, _lowerCamelCase : List[str]=True, _lowerCamelCase : Optional[Any]=True, _lowerCamelCase : int=99, _lowerCamelCase : Optional[int]=32, _lowerCamelCase : Tuple=5, _lowerCamelCase : Tuple=4, _lowerCamelCase : str=37, _lowerCamelCase : Union[str, Any]="gelu", _lowerCamelCase : int=0.1, _lowerCamelCase : List[Any]=0.1, _lowerCamelCase : Dict=5_12, _lowerCamelCase : List[Any]=16, _lowerCamelCase : Any=2, _lowerCamelCase : Any=0.02, _lowerCamelCase : Dict=4, ): '''simple docstring''' __A = parent __A = batch_size __A = seq_length __A = is_training __A = use_attention_mask __A = use_token_type_ids __A = use_labels __A = vocab_size __A = hidden_size __A = num_hidden_layers __A = num_attention_heads __A = intermediate_size __A = hidden_act __A = hidden_dropout_prob __A = attention_probs_dropout_prob __A = max_position_embeddings __A = type_vocab_size __A = type_sequence_label_size __A = initializer_range __A = num_choices def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' __A = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) __A = None if self.use_attention_mask: __A = random_attention_mask([self.batch_size, self.seq_length] ) __A = None if self.use_token_type_ids: __A = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size ) __A = RoFormerConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=_lowerCamelCase, initializer_range=self.initializer_range, ) return config, input_ids, token_type_ids, attention_mask def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = self.prepare_config_and_inputs() __A , __A , __A , __A = config_and_inputs __A = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask} return config, inputs_dict @require_flax class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : Dict = True A_ : Tuple = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' __A = FlaxRoFormerModelTester(self ) @slow def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' for model_class_name in self.all_model_classes: __A = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''', from_pt=_lowerCamelCase ) __A = model(np.ones((1, 1) ) ) self.assertIsNotNone(_lowerCamelCase ) @require_flax class snake_case ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' ) __A = jnp.array([[0, 1, 2, 3, 4, 5]] ) __A = model(_lowerCamelCase )[0] __A = 5_00_00 __A = (1, 6, vocab_size) self.assertEqual(output.shape, _lowerCamelCase ) __A = jnp.array( [[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3], _lowerCamelCase, atol=1e-4 ) )
266
1
"""simple docstring""" import numpy as np import torch from torch.utils.data import Dataset from utils import logger class snake_case ( _lowerCAmelCase ): '''simple docstring''' def __init__( self : Optional[int], _lowerCamelCase : Optional[int], _lowerCamelCase : Union[str, Any] ): '''simple docstring''' __A = params __A = np.array(_lowerCamelCase ) __A = np.array([len(_lowerCamelCase ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self : Union[str, Any], _lowerCamelCase : Union[str, Any] ): '''simple docstring''' return (self.token_ids[index], self.lengths[index]) def __len__( self : Any ): '''simple docstring''' return len(self.lengths ) def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' __A = self.params.max_model_input_size __A = self.lengths > max_len logger.info(f'Splitting {sum(_lowerCamelCase )} too long sequences.' ) def divide_chunks(_lowerCamelCase : Dict, _lowerCamelCase : Optional[Any] ): return [l[i : i + n] for i in range(0, len(_lowerCamelCase ), _lowerCamelCase )] __A = [] __A = [] if self.params.mlm: __A , __A = self.params.special_tok_ids['''cls_token'''], self.params.special_tok_ids['''sep_token'''] else: __A , __A = self.params.special_tok_ids['''bos_token'''], self.params.special_tok_ids['''eos_token'''] for seq_, len_ in zip(self.token_ids, self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: __A = [] for sub_s in divide_chunks(seq_, max_len - 2 ): if sub_s[0] != cls_id: __A = np.insert(_lowerCamelCase, 0, _lowerCamelCase ) if sub_s[-1] != sep_id: __A = np.insert(_lowerCamelCase, len(_lowerCamelCase ), _lowerCamelCase ) assert len(_lowerCamelCase ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(_lowerCamelCase ) new_tok_ids.extend(_lowerCamelCase ) new_lengths.extend([len(_lowerCamelCase ) for l in sub_seqs] ) __A = np.array(_lowerCamelCase ) __A = np.array(_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = len(self ) __A = self.lengths > 11 __A = self.token_ids[indices] __A = self.lengths[indices] __A = len(self ) logger.info(f'Remove {init_size - new_size} too short (<=11 tokens) sequences.' ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' if "unk_token" not in self.params.special_tok_ids: return else: __A = self.params.special_tok_ids['''unk_token'''] __A = len(self ) __A = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) __A = (unk_occs / self.lengths) < 0.5 __A = self.token_ids[indices] __A = self.lengths[indices] __A = len(self ) logger.info(f'Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).' ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' if not self.params.is_master: return logger.info(f'{len(self )} sequences' ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def _SCREAMING_SNAKE_CASE ( self : int, _lowerCamelCase : Optional[int] ): '''simple docstring''' __A = [t[0] for t in batch] __A = [t[1] for t in batch] assert len(_lowerCamelCase ) == len(_lowerCamelCase ) # Max for paddings __A = max(_lowerCamelCase ) # Pad token ids if self.params.mlm: __A = self.params.special_tok_ids['''pad_token'''] else: __A = self.params.special_tok_ids['''unk_token'''] __A = [list(t.astype(_lowerCamelCase ) ) + [pad_idx] * (max_seq_len_ - len(_lowerCamelCase )) for t in token_ids] assert len(tk_ ) == len(_lowerCamelCase ) assert all(len(_lowerCamelCase ) == max_seq_len_ for t in tk_ ) __A = torch.tensor(tk_ ) # (bs, max_seq_len_) __A = torch.tensor(_lowerCamelCase ) # (bs) return tk_t, lg_t
266
"""simple docstring""" from collections import defaultdict from math import ceil, sqrt def lowerCAmelCase ( __UpperCamelCase = 1_0_0_0_0_0_0 , __UpperCamelCase = 1_0 ): """simple docstring""" __A = defaultdict(__UpperCamelCase ) for outer_width in range(3 , (t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: __A = max( ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 ) else: __A = 1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(__UpperCamelCase , outer_width - 1 , 2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 1_0 ) if __name__ == "__main__": print(F'''{solution() = }''')
266
1
"""simple docstring""" # A Bipartite Graph is a graph whose vertices can be divided into two independent sets, # U and V such that every edge (u, v) either connects a vertex from U to V or a vertex # from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, # or u belongs to V and v to U. We can also say that there is no edge that connects # vertices of same set. def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = [False] * len(__UpperCamelCase ) __A = [-1] * len(__UpperCamelCase ) def dfs(__UpperCamelCase , __UpperCamelCase ): __A = True __A = c for u in graph[v]: if not visited[u]: dfs(__UpperCamelCase , 1 - c ) for i in range(len(__UpperCamelCase ) ): if not visited[i]: dfs(__UpperCamelCase , 0 ) for i in range(len(__UpperCamelCase ) ): for j in graph[i]: if color[i] == color[j]: return False return True # Adjacency list of graph lowercase_ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []} print(check_bipartite_dfs(graph))
266
"""simple docstring""" import numpy as np import torch from torch.utils.data import DataLoader from accelerate.utils.dataclasses import DistributedType class snake_case : '''simple docstring''' def __init__( self : Optional[int], _lowerCamelCase : Optional[int]=2, _lowerCamelCase : Optional[int]=3, _lowerCamelCase : int=64, _lowerCamelCase : List[str]=None ): '''simple docstring''' __A = np.random.default_rng(_lowerCamelCase ) __A = length __A = rng.normal(size=(length,) ).astype(np.floataa ) __A = a * self.x + b + rng.normal(scale=0.1, size=(length,) ).astype(np.floataa ) def __len__( self : str ): '''simple docstring''' return self.length def __getitem__( self : Dict, _lowerCamelCase : Optional[int] ): '''simple docstring''' return {"x": self.x[i], "y": self.y[i]} class snake_case ( torch.nn.Module ): '''simple docstring''' def __init__( self : Optional[Any], _lowerCamelCase : Tuple=0, _lowerCamelCase : Any=0, _lowerCamelCase : Optional[Any]=False ): '''simple docstring''' super().__init__() __A = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) __A = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) __A = True def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : Optional[Any]=None ): '''simple docstring''' if self.first_batch: print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' ) __A = False return x * self.a[0] + self.b[0] class snake_case ( torch.nn.Module ): '''simple docstring''' def __init__( self : str, _lowerCamelCase : Optional[Any]=0, _lowerCamelCase : Any=0, _lowerCamelCase : List[Any]=False ): '''simple docstring''' super().__init__() __A = torch.nn.Parameter(torch.tensor(_lowerCamelCase ).float() ) __A = torch.nn.Parameter(torch.tensor(_lowerCamelCase ).float() ) __A = True def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : List[str]=None ): '''simple docstring''' if self.first_batch: print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' ) __A = False return x * self.a + self.b def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase = 1_6 ): """simple docstring""" from datasets import load_dataset from transformers import AutoTokenizer __A = AutoTokenizer.from_pretrained('''bert-base-cased''' ) __A = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''} __A = load_dataset('''csv''' , data_files=__UpperCamelCase ) __A = datasets['''train'''].unique('''label''' ) __A = {v: i for i, v in enumerate(__UpperCamelCase )} def tokenize_function(__UpperCamelCase ): # max_length=None => use the model max length (it's actually the default) __A = tokenizer( examples['''sentence1'''] , examples['''sentence2'''] , truncation=__UpperCamelCase , max_length=__UpperCamelCase , padding='''max_length''' ) if "label" in examples: __A = [label_to_id[l] for l in examples['''label''']] return outputs # Apply the method we just defined to all the examples in all the splits of the dataset __A = datasets.map( __UpperCamelCase , batched=__UpperCamelCase , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , ) def collate_fn(__UpperCamelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(__UpperCamelCase , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' ) return tokenizer.pad(__UpperCamelCase , padding='''longest''' , return_tensors='''pt''' ) # Instantiate dataloaders. __A = DataLoader(tokenized_datasets['''train'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=2 ) __A = DataLoader(tokenized_datasets['''validation'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=1 ) return train_dataloader, eval_dataloader
266
1
"""simple docstring""" from __future__ import annotations from collections.abc import MutableSequence class snake_case : '''simple docstring''' def __init__( self : Dict, _lowerCamelCase : int, _lowerCamelCase : MutableSequence[float] ): '''simple docstring''' if len(_lowerCamelCase ) != degree + 1: raise ValueError( '''The number of coefficients should be equal to the degree + 1.''' ) __A = list(_lowerCamelCase ) __A = degree def __add__( self : List[Any], _lowerCamelCase : Polynomial ): '''simple docstring''' if self.degree > polynomial_a.degree: __A = self.coefficients[:] for i in range(polynomial_a.degree + 1 ): coefficients[i] += polynomial_a.coefficients[i] return Polynomial(self.degree, _lowerCamelCase ) else: __A = polynomial_a.coefficients[:] for i in range(self.degree + 1 ): coefficients[i] += self.coefficients[i] return Polynomial(polynomial_a.degree, _lowerCamelCase ) def __sub__( self : Optional[int], _lowerCamelCase : Polynomial ): '''simple docstring''' return self + polynomial_a * Polynomial(0, [-1] ) def __neg__( self : Optional[int] ): '''simple docstring''' return Polynomial(self.degree, [-c for c in self.coefficients] ) def __mul__( self : List[Any], _lowerCamelCase : Polynomial ): '''simple docstring''' __A = [0] * (self.degree + polynomial_a.degree + 1) for i in range(self.degree + 1 ): for j in range(polynomial_a.degree + 1 ): coefficients[i + j] += ( self.coefficients[i] * polynomial_a.coefficients[j] ) return Polynomial(self.degree + polynomial_a.degree, _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : int | float ): '''simple docstring''' __A = 0 for i in range(self.degree + 1 ): result += self.coefficients[i] * (substitution**i) return result def __str__( self : Tuple ): '''simple docstring''' __A = '''''' for i in range(self.degree, -1, -1 ): if self.coefficients[i] == 0: continue elif self.coefficients[i] > 0: if polynomial: polynomial += " + " else: polynomial += " - " if i == 0: polynomial += str(abs(self.coefficients[i] ) ) elif i == 1: polynomial += str(abs(self.coefficients[i] ) ) + "x" else: polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(_lowerCamelCase ) return polynomial def __repr__( self : List[str] ): '''simple docstring''' return self.__str__() def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = [0] * self.degree for i in range(self.degree ): __A = self.coefficients[i + 1] * (i + 1) return Polynomial(self.degree - 1, _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : int | float = 0 ): '''simple docstring''' __A = [0] * (self.degree + 2) __A = constant for i in range(self.degree + 1 ): __A = self.coefficients[i] / (i + 1) return Polynomial(self.degree + 1, _lowerCamelCase ) def __eq__( self : Optional[int], _lowerCamelCase : object ): '''simple docstring''' if not isinstance(_lowerCamelCase, _lowerCamelCase ): return False if self.degree != polynomial_a.degree: return False for i in range(self.degree + 1 ): if self.coefficients[i] != polynomial_a.coefficients[i]: return False return True def __ne__( self : List[Any], _lowerCamelCase : object ): '''simple docstring''' return not self.__eq__(_lowerCamelCase )
266
"""simple docstring""" from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo lowercase_ = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n' lowercase_ = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n' lowercase_ = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case ( datasets.Metric ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''', id='''token''' ), id='''sequence''' ), '''references''': datasets.Sequence( datasets.Sequence(datasets.Value('''string''', id='''token''' ), id='''sequence''' ), id='''references''' ), } ), ) def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : List[List[List[str]]], _lowerCamelCase : List[List[str]], _lowerCamelCase : int = 1, _lowerCamelCase : int = 4, ): '''simple docstring''' return { "google_bleu": gleu_score.corpus_gleu( list_of_references=_lowerCamelCase, hypotheses=_lowerCamelCase, min_len=_lowerCamelCase, max_len=_lowerCamelCase ) }
266
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase_ = { 'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ 'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST', 'PegasusXForConditionalGeneration', 'PegasusXModel', 'PegasusXPreTrainedModel', ] if TYPE_CHECKING: from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pegasus_x import ( PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST, PegasusXForConditionalGeneration, PegasusXModel, PegasusXPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
266
"""simple docstring""" class snake_case : '''simple docstring''' def __init__( self : List[str], _lowerCamelCase : list[int] ): '''simple docstring''' __A = len(_lowerCamelCase ) __A = [0] * len_array if len_array > 0: __A = array[0] for i in range(1, _lowerCamelCase ): __A = self.prefix_sum[i - 1] + array[i] def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : int, _lowerCamelCase : int ): '''simple docstring''' if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : int ): '''simple docstring''' __A = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(_lowerCamelCase ) return False if __name__ == "__main__": import doctest doctest.testmod()
266
1
"""simple docstring""" import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class snake_case ( unittest.TestCase ): '''simple docstring''' def __init__( self : Optional[Any], _lowerCamelCase : Tuple, _lowerCamelCase : List[str]=13, _lowerCamelCase : Optional[Any]=7, _lowerCamelCase : Optional[int]=True, _lowerCamelCase : int=True, _lowerCamelCase : List[str]=True, _lowerCamelCase : Optional[Any]=True, _lowerCamelCase : int=99, _lowerCamelCase : Optional[int]=32, _lowerCamelCase : Tuple=5, _lowerCamelCase : Tuple=4, _lowerCamelCase : str=37, _lowerCamelCase : Union[str, Any]="gelu", _lowerCamelCase : int=0.1, _lowerCamelCase : List[Any]=0.1, _lowerCamelCase : Dict=5_12, _lowerCamelCase : List[Any]=16, _lowerCamelCase : Any=2, _lowerCamelCase : Any=0.02, _lowerCamelCase : Dict=4, ): '''simple docstring''' __A = parent __A = batch_size __A = seq_length __A = is_training __A = use_attention_mask __A = use_token_type_ids __A = use_labels __A = vocab_size __A = hidden_size __A = num_hidden_layers __A = num_attention_heads __A = intermediate_size __A = hidden_act __A = hidden_dropout_prob __A = attention_probs_dropout_prob __A = max_position_embeddings __A = type_vocab_size __A = type_sequence_label_size __A = initializer_range __A = num_choices def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' __A = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) __A = None if self.use_attention_mask: __A = random_attention_mask([self.batch_size, self.seq_length] ) __A = None if self.use_token_type_ids: __A = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size ) __A = RoFormerConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=_lowerCamelCase, initializer_range=self.initializer_range, ) return config, input_ids, token_type_ids, attention_mask def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = self.prepare_config_and_inputs() __A , __A , __A , __A = config_and_inputs __A = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask} return config, inputs_dict @require_flax class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : Dict = True A_ : Tuple = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' __A = FlaxRoFormerModelTester(self ) @slow def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' for model_class_name in self.all_model_classes: __A = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''', from_pt=_lowerCamelCase ) __A = model(np.ones((1, 1) ) ) self.assertIsNotNone(_lowerCamelCase ) @require_flax class snake_case ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' ) __A = jnp.array([[0, 1, 2, 3, 4, 5]] ) __A = model(_lowerCamelCase )[0] __A = 5_00_00 __A = (1, 6, vocab_size) self.assertEqual(output.shape, _lowerCamelCase ) __A = jnp.array( [[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3], _lowerCamelCase, atol=1e-4 ) )
266
"""simple docstring""" import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowercase_ = logging.get_logger(__name__) lowercase_ = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } lowercase_ = { 'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'}, 'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'}, 'tokenizer_config_file': { 'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json' }, } lowercase_ = {'facebook/blenderbot-3B': 128} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def lowerCAmelCase ( ): """simple docstring""" __A = ( list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) ) ) __A = bs[:] __A = 0 for b in range(2**8 ): if b not in bs: bs.append(__UpperCamelCase ) cs.append(2**8 + n ) n += 1 __A = [chr(__UpperCamelCase ) for n in cs] return dict(zip(__UpperCamelCase , __UpperCamelCase ) ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = set() __A = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __A = char return pairs class snake_case ( _lowerCAmelCase ): '''simple docstring''' A_ : Tuple = VOCAB_FILES_NAMES A_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP A_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A_ : Optional[Any] = ["input_ids", "attention_mask"] def __init__( self : Dict, _lowerCamelCase : Optional[Any], _lowerCamelCase : List[str], _lowerCamelCase : Dict="replace", _lowerCamelCase : Any="<s>", _lowerCamelCase : Optional[int]="</s>", _lowerCamelCase : Dict="</s>", _lowerCamelCase : List[Any]="<s>", _lowerCamelCase : List[str]="<unk>", _lowerCamelCase : str="<pad>", _lowerCamelCase : Any="<mask>", _lowerCamelCase : Any=False, **_lowerCamelCase : Tuple, ): '''simple docstring''' __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else bos_token __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else eos_token __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else sep_token __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else cls_token __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else unk_token __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else mask_token super().__init__( errors=_lowerCamelCase, bos_token=_lowerCamelCase, eos_token=_lowerCamelCase, unk_token=_lowerCamelCase, sep_token=_lowerCamelCase, cls_token=_lowerCamelCase, pad_token=_lowerCamelCase, mask_token=_lowerCamelCase, add_prefix_space=_lowerCamelCase, **_lowerCamelCase, ) with open(_lowerCamelCase, encoding='''utf-8''' ) as vocab_handle: __A = json.load(_lowerCamelCase ) __A = {v: k for k, v in self.encoder.items()} __A = errors # how to handle errors in decoding __A = bytes_to_unicode() __A = {v: k for k, v in self.byte_encoder.items()} with open(_lowerCamelCase, encoding='''utf-8''' ) as merges_handle: __A = merges_handle.read().split('''\n''' )[1:-1] __A = [tuple(merge.split() ) for merge in bpe_merges] __A = dict(zip(_lowerCamelCase, range(len(_lowerCamelCase ) ) ) ) __A = {} __A = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __A = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' return len(self.encoder ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' return dict(self.encoder, **self.added_tokens_encoder ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : List[Any] ): '''simple docstring''' if token in self.cache: return self.cache[token] __A = tuple(_lowerCamelCase ) __A = get_pairs(_lowerCamelCase ) if not pairs: return token while True: __A = min(_lowerCamelCase, key=lambda _lowerCamelCase : self.bpe_ranks.get(_lowerCamelCase, float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break __A , __A = bigram __A = [] __A = 0 while i < len(_lowerCamelCase ): try: __A = word.index(_lowerCamelCase, _lowerCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __A = j if word[i] == first and i < len(_lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __A = tuple(_lowerCamelCase ) __A = new_word if len(_lowerCamelCase ) == 1: break else: __A = get_pairs(_lowerCamelCase ) __A = ''' '''.join(_lowerCamelCase ) __A = word return word def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : Dict ): '''simple docstring''' __A = [] for token in re.findall(self.pat, _lowerCamelCase ): __A = ''''''.join( self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowerCamelCase ).split(''' ''' ) ) return bpe_tokens def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : Dict ): '''simple docstring''' return self.encoder.get(_lowerCamelCase, self.encoder.get(self.unk_token ) ) def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : Any ): '''simple docstring''' return self.decoder.get(_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : Dict ): '''simple docstring''' __A = ''''''.join(_lowerCamelCase ) __A = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''', errors=self.errors ) return text def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : str, _lowerCamelCase : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(_lowerCamelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __A = os.path.join( _lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) __A = os.path.join( _lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(_lowerCamelCase, '''w''', encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=_lowerCamelCase, ensure_ascii=_lowerCamelCase ) + '''\n''' ) __A = 0 with open(_lowerCamelCase, '''w''', encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda _lowerCamelCase : kv[1] ): if index != token_index: logger.warning( f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.' ''' Please check that the tokenizer is not corrupted!''' ) __A = token_index writer.write(''' '''.join(_lowerCamelCase ) + '''\n''' ) index += 1 return vocab_file, merge_file def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : List[int], _lowerCamelCase : Optional[List[int]] = None, _lowerCamelCase : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCamelCase, token_ids_a=_lowerCamelCase, already_has_special_tokens=_lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(_lowerCamelCase )) + [1] return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1] def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : List[int], _lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' __A = [self.sep_token_id] __A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : Union[str, Any], _lowerCamelCase : List[str]=False, **_lowerCamelCase : List[Any] ): '''simple docstring''' __A = kwargs.pop('''add_prefix_space''', self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(_lowerCamelCase ) > 0 and not text[0].isspace()): __A = ''' ''' + text return (text, kwargs) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : List[int], _lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' return token_ids_a + [self.eos_token_id] def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : "Conversation" ): '''simple docstring''' __A = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(''' ''' + text ) else: # Generated responses should contain them already. inputs.append(_lowerCamelCase ) __A = ''' '''.join(_lowerCamelCase ) __A = self.encode(_lowerCamelCase ) if len(_lowerCamelCase ) > self.model_max_length: __A = input_ids[-self.model_max_length :] logger.warning(f'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' ) return input_ids
266
1
"""simple docstring""" import itertools import os import random import tempfile import unittest import numpy as np from datasets import load_dataset from transformers import is_speech_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import WhisperFeatureExtractor if is_torch_available(): import torch lowercase_ = random.Random() def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase=1.0 , __UpperCamelCase=None , __UpperCamelCase=None ): """simple docstring""" if rng is None: __A = global_rng __A = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class snake_case ( unittest.TestCase ): '''simple docstring''' def __init__( self : Dict, _lowerCamelCase : Union[str, Any], _lowerCamelCase : str=7, _lowerCamelCase : Dict=4_00, _lowerCamelCase : Tuple=20_00, _lowerCamelCase : Any=10, _lowerCamelCase : Union[str, Any]=1_60, _lowerCamelCase : Tuple=8, _lowerCamelCase : Optional[int]=0.0, _lowerCamelCase : Optional[Any]=40_00, _lowerCamelCase : List[Any]=False, _lowerCamelCase : Any=True, ): '''simple docstring''' __A = parent __A = batch_size __A = min_seq_length __A = max_seq_length __A = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) __A = padding_value __A = sampling_rate __A = return_attention_mask __A = do_normalize __A = feature_size __A = chunk_length __A = hop_length def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' return { "feature_size": self.feature_size, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : int=False, _lowerCamelCase : Union[str, Any]=False ): '''simple docstring''' def _flatten(_lowerCamelCase : Any ): return list(itertools.chain(*_lowerCamelCase ) ) if equal_length: __A = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size __A = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff ) ] if numpify: __A = [np.asarray(_lowerCamelCase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : List[Any] = WhisperFeatureExtractor if is_speech_available() else None def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' __A = WhisperFeatureExtractionTester(self ) def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' __A = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __A = feat_extract_first.save_pretrained(_lowerCamelCase )[0] check_json_file_has_correct_format(_lowerCamelCase ) __A = self.feature_extraction_class.from_pretrained(_lowerCamelCase ) __A = feat_extract_first.to_dict() __A = feat_extract_second.to_dict() __A = feat_extract_first.mel_filters __A = feat_extract_second.mel_filters self.assertTrue(np.allclose(_lowerCamelCase, _lowerCamelCase ) ) self.assertEqual(_lowerCamelCase, _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' __A = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __A = os.path.join(_lowerCamelCase, '''feat_extract.json''' ) feat_extract_first.to_json_file(_lowerCamelCase ) __A = self.feature_extraction_class.from_json_file(_lowerCamelCase ) __A = feat_extract_first.to_dict() __A = feat_extract_second.to_dict() __A = feat_extract_first.mel_filters __A = feat_extract_second.mel_filters self.assertTrue(np.allclose(_lowerCamelCase, _lowerCamelCase ) ) self.assertEqual(_lowerCamelCase, _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' # Tests that all call wrap to encode_plus and batch_encode_plus __A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 __A = [floats_list((1, x) )[0] for x in range(8_00, 14_00, 2_00 )] __A = [np.asarray(_lowerCamelCase ) for speech_input in speech_inputs] # Test feature size __A = feature_extractor(_lowerCamelCase, padding='''max_length''', return_tensors='''np''' ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames ) self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size ) # Test not batched input __A = feature_extractor(speech_inputs[0], return_tensors='''np''' ).input_features __A = feature_extractor(np_speech_inputs[0], return_tensors='''np''' ).input_features self.assertTrue(np.allclose(_lowerCamelCase, _lowerCamelCase, atol=1e-3 ) ) # Test batched __A = feature_extractor(_lowerCamelCase, return_tensors='''np''' ).input_features __A = feature_extractor(_lowerCamelCase, return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(_lowerCamelCase, _lowerCamelCase ): self.assertTrue(np.allclose(_lowerCamelCase, _lowerCamelCase, atol=1e-3 ) ) # Test 2-D numpy arrays are batched. __A = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)] __A = np.asarray(_lowerCamelCase ) __A = feature_extractor(_lowerCamelCase, return_tensors='''np''' ).input_features __A = feature_extractor(_lowerCamelCase, return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(_lowerCamelCase, _lowerCamelCase ): self.assertTrue(np.allclose(_lowerCamelCase, _lowerCamelCase, atol=1e-3 ) ) # Test truncation required __A = [floats_list((1, x) )[0] for x in range(2_00, (feature_extractor.n_samples + 5_00), 2_00 )] __A = [np.asarray(_lowerCamelCase ) for speech_input in speech_inputs] __A = [x[: feature_extractor.n_samples] for x in speech_inputs] __A = [np.asarray(_lowerCamelCase ) for speech_input in speech_inputs_truncated] __A = feature_extractor(_lowerCamelCase, return_tensors='''np''' ).input_features __A = feature_extractor(_lowerCamelCase, return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(_lowerCamelCase, _lowerCamelCase ): self.assertTrue(np.allclose(_lowerCamelCase, _lowerCamelCase, atol=1e-3 ) ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' import torch __A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __A = np.random.rand(1_00, 32 ).astype(np.floataa ) __A = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: __A = feature_extractor.pad([{'''input_features''': inputs}], return_tensors='''np''' ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) __A = feature_extractor.pad([{'''input_features''': inputs}], return_tensors='''pt''' ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : Dict ): '''simple docstring''' __A = load_dataset('''hf-internal-testing/librispeech_asr_dummy''', '''clean''', split='''validation''' ) # automatic decoding with librispeech __A = ds.sort('''id''' ).select(range(_lowerCamelCase ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' # fmt: off __A = torch.tensor( [ 0.11_93, -0.09_46, -0.10_98, -0.01_96, 0.02_25, -0.06_90, -0.17_36, 0.09_51, 0.09_71, -0.08_17, -0.07_02, 0.01_62, 0.02_60, 0.00_17, -0.01_92, -0.16_78, 0.07_09, -0.18_67, -0.06_55, -0.02_74, -0.02_34, -0.18_84, -0.05_16, -0.05_54, -0.02_74, -0.14_25, -0.14_23, 0.08_37, 0.03_77, -0.08_54 ] ) # fmt: on __A = self._load_datasamples(1 ) __A = WhisperFeatureExtractor() __A = feature_extractor(_lowerCamelCase, return_tensors='''pt''' ).input_features self.assertEqual(input_features.shape, (1, 80, 30_00) ) self.assertTrue(torch.allclose(input_features[0, 0, :30], _lowerCamelCase, atol=1e-4 ) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' __A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __A = self._load_datasamples(1 )[0] __A = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_55_35 # Rescale to [0, 65535] to show issue __A = feat_extract.zero_mean_unit_var_norm([audio], attention_mask=_lowerCamelCase )[0] self.assertTrue(np.all(np.mean(_lowerCamelCase ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(_lowerCamelCase ) - 1 ) < 1e-3 ) )
266
"""simple docstring""" import importlib import inspect import json import os import re import shutil import sys from pathlib import Path from typing import Dict, Optional, Union from urllib import request from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info from packaging import version from .. import __version__ from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging lowercase_ = ( 'https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py' ) lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name def lowerCAmelCase ( ): """simple docstring""" __A = '''https://pypi.org/pypi/diffusers/json''' __A = json.loads(request.urlopen(__UpperCamelCase ).read() )['''releases'''].keys() return sorted(__UpperCamelCase , key=lambda __UpperCamelCase : version.Version(__UpperCamelCase ) ) def lowerCAmelCase ( ): """simple docstring""" if HF_MODULES_CACHE in sys.path: return sys.path.append(__UpperCamelCase ) os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase ) __A = Path(__UpperCamelCase ) / '''__init__.py''' if not init_path.exists(): init_path.touch() def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" init_hf_modules() __A = Path(__UpperCamelCase ) / name # If the parent module does not exist yet, recursively create it. if not dynamic_module_path.parent.exists(): create_dynamic_module(dynamic_module_path.parent ) os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase ) __A = dynamic_module_path / '''__init__.py''' if not init_path.exists(): init_path.touch() def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" with open(__UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f: __A = f.read() # Imports of the form `import .xxx` __A = re.findall('''^\s*import\s+\.(\S+)\s*$''' , __UpperCamelCase , flags=re.MULTILINE ) # Imports of the form `from .xxx import yyy` relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , __UpperCamelCase , flags=re.MULTILINE ) # Unique-ify return list(set(__UpperCamelCase ) ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = False __A = [module_file] __A = [] # Let's recurse through all relative imports while not no_change: __A = [] for f in files_to_check: new_imports.extend(get_relative_imports(__UpperCamelCase ) ) __A = Path(__UpperCamelCase ).parent __A = [str(module_path / m ) for m in new_imports] __A = [f for f in new_import_files if f not in all_relative_imports] __A = [f'{f}.py' for f in new_import_files] __A = len(__UpperCamelCase ) == 0 all_relative_imports.extend(__UpperCamelCase ) return all_relative_imports def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" with open(__UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f: __A = f.read() # Imports of the form `import xxx` __A = re.findall('''^\s*import\s+(\S+)\s*$''' , __UpperCamelCase , flags=re.MULTILINE ) # Imports of the form `from xxx import yyy` imports += re.findall('''^\s*from\s+(\S+)\s+import''' , __UpperCamelCase , flags=re.MULTILINE ) # Only keep the top-level module __A = [imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )] # Unique-ify and test we got them all __A = list(set(__UpperCamelCase ) ) __A = [] for imp in imports: try: importlib.import_module(__UpperCamelCase ) except ImportError: missing_packages.append(__UpperCamelCase ) if len(__UpperCamelCase ) > 0: raise ImportError( '''This modeling file requires the following packages that were not found in your environment: ''' f'{", ".join(__UpperCamelCase )}. Run `pip install {" ".join(__UpperCamelCase )}`' ) return get_relative_imports(__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" __A = module_path.replace(os.path.sep , '''.''' ) __A = importlib.import_module(__UpperCamelCase ) if class_name is None: return find_pipeline_class(__UpperCamelCase ) return getattr(__UpperCamelCase , __UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" from ..pipelines import DiffusionPipeline __A = dict(inspect.getmembers(__UpperCamelCase , inspect.isclass ) ) __A = None for cls_name, cls in cls_members.items(): if ( cls_name != DiffusionPipeline.__name__ and issubclass(cls , __UpperCamelCase ) and cls.__module__.split('''.''' )[0] != "diffusers" ): if pipeline_class is not None: raise ValueError( f'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:' f' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in' f' {loaded_module}.' ) __A = cls return pipeline_class def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , ): """simple docstring""" __A = str(__UpperCamelCase ) __A = os.path.join(__UpperCamelCase , __UpperCamelCase ) if os.path.isfile(__UpperCamelCase ): __A = module_file_or_url __A = '''local''' elif pretrained_model_name_or_path.count('''/''' ) == 0: __A = get_diffusers_versions() # cut ".dev0" __A = '''v''' + '''.'''.join(__version__.split('''.''' )[:3] ) # retrieve github version that matches if revision is None: __A = latest_version if latest_version[1:] in available_versions else '''main''' logger.info(f'Defaulting to latest_version: {revision}.' ) elif revision in available_versions: __A = f'v{revision}' elif revision == "main": __A = revision else: raise ValueError( f'`custom_revision`: {revision} does not exist. Please make sure to choose one of' f' {", ".join(available_versions + ["main"] )}.' ) # community pipeline on GitHub __A = COMMUNITY_PIPELINES_URL.format(revision=__UpperCamelCase , pipeline=__UpperCamelCase ) try: __A = cached_download( __UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , proxies=__UpperCamelCase , resume_download=__UpperCamelCase , local_files_only=__UpperCamelCase , use_auth_token=__UpperCamelCase , ) __A = '''git''' __A = pretrained_model_name_or_path + '''.py''' except EnvironmentError: logger.error(f'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' ) raise else: try: # Load from URL or cache if already cached __A = hf_hub_download( __UpperCamelCase , __UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , proxies=__UpperCamelCase , resume_download=__UpperCamelCase , local_files_only=__UpperCamelCase , use_auth_token=__UpperCamelCase , ) __A = os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) ) except EnvironmentError: logger.error(f'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' ) raise # Check we have all the requirements in our environment __A = check_imports(__UpperCamelCase ) # Now we move the module inside our cached dynamic modules. __A = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule create_dynamic_module(__UpperCamelCase ) __A = Path(__UpperCamelCase ) / full_submodule if submodule == "local" or submodule == "git": # We always copy local files (we could hash the file to see if there was a change, and give them the name of # that hash, to only copy when there is a modification but it seems overkill for now). # The only reason we do the copy is to avoid putting too many folders in sys.path. shutil.copy(__UpperCamelCase , submodule_path / module_file ) for module_needed in modules_needed: __A = f'{module_needed}.py' shutil.copy(os.path.join(__UpperCamelCase , __UpperCamelCase ) , submodule_path / module_needed ) else: # Get the commit hash # TODO: we will get this info in the etag soon, so retrieve it from there and not here. if isinstance(__UpperCamelCase , __UpperCamelCase ): __A = use_auth_token elif use_auth_token is True: __A = HfFolder.get_token() else: __A = None __A = model_info(__UpperCamelCase , revision=__UpperCamelCase , token=__UpperCamelCase ).sha # The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the # benefit of versioning. __A = submodule_path / commit_hash __A = full_submodule + os.path.sep + commit_hash create_dynamic_module(__UpperCamelCase ) if not (submodule_path / module_file).exists(): shutil.copy(__UpperCamelCase , submodule_path / module_file ) # Make sure we also have every file with relative for module_needed in modules_needed: if not (submodule_path / module_needed).exists(): get_cached_module_file( __UpperCamelCase , f'{module_needed}.py' , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , resume_download=__UpperCamelCase , proxies=__UpperCamelCase , use_auth_token=__UpperCamelCase , revision=__UpperCamelCase , local_files_only=__UpperCamelCase , ) return os.path.join(__UpperCamelCase , __UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , **__UpperCamelCase , ): """simple docstring""" __A = get_cached_module_file( __UpperCamelCase , __UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , resume_download=__UpperCamelCase , proxies=__UpperCamelCase , use_auth_token=__UpperCamelCase , revision=__UpperCamelCase , local_files_only=__UpperCamelCase , ) return get_class_in_module(__UpperCamelCase , final_module.replace('''.py''' , '''''' ) )
266
1
"""simple docstring""" import asyncio import os import shutil import subprocess import sys import tempfile import unittest from distutils.util import strtobool from functools import partial from pathlib import Path from typing import List, Union from unittest import mock import torch from ..state import AcceleratorState, PartialState from ..utils import ( gather, is_bnb_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_mps_available, is_safetensors_available, is_tensorboard_available, is_torch_version, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase=False ): """simple docstring""" try: __A = os.environ[key] except KeyError: # KEY isn't set, default to `default`. __A = default else: # KEY is set, convert it to True or False. try: __A = strtobool(__UpperCamelCase ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f'If set, {key} must be yes or no.' ) return _value lowercase_ = parse_flag_from_env('RUN_SLOW', default=False) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" return unittest.skip('''Test was skipped''' )(__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" return unittest.skipUnless( is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase=None , __UpperCamelCase=None ): """simple docstring""" if test_case is None: return partial(__UpperCamelCase , version=__UpperCamelCase ) return unittest.skipUnless(is_torch_version('''>=''' , __UpperCamelCase ) , f'test requires torch version >= {version}' )(__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(__UpperCamelCase ) lowercase_ = ( any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available() ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" return unittest.skipUnless( _atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(__UpperCamelCase ) class snake_case ( unittest.TestCase ): '''simple docstring''' A_ : str = True @classmethod def _SCREAMING_SNAKE_CASE ( cls : Tuple ): '''simple docstring''' __A = tempfile.mkdtemp() @classmethod def _SCREAMING_SNAKE_CASE ( cls : Dict ): '''simple docstring''' if os.path.exists(cls.tmpdir ): shutil.rmtree(cls.tmpdir ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' if self.clear_on_setup: for path in Path(self.tmpdir ).glob('''**/*''' ): if path.is_file(): path.unlink() elif path.is_dir(): shutil.rmtree(_lowerCamelCase ) class snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : List[str] ): '''simple docstring''' super().tearDown() # Reset the state of the AcceleratorState singleton. AcceleratorState._reset_state() PartialState._reset_state() class snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : Union[mock.Mock, List[mock.Mock]] ): '''simple docstring''' __A = mocks if isinstance(_lowerCamelCase, (tuple, list) ) else [mocks] for m in self.mocks: m.start() self.addCleanup(m.stop ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = AcceleratorState() __A = tensor[None].clone().to(state.device ) __A = gather(__UpperCamelCase ).cpu() __A = tensor[0].cpu() for i in range(tensors.shape[0] ): if not torch.equal(tensors[i] , __UpperCamelCase ): return False return True class snake_case : '''simple docstring''' def __init__( self : str, _lowerCamelCase : Union[str, Any], _lowerCamelCase : str, _lowerCamelCase : Union[str, Any] ): '''simple docstring''' __A = returncode __A = stdout __A = stderr async def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" while True: __A = await stream.readline() if line: callback(__UpperCamelCase ) else: break async def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=False , __UpperCamelCase=False ): """simple docstring""" if echo: print('''\nRunning: ''' , ''' '''.join(__UpperCamelCase ) ) __A = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=__UpperCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__UpperCamelCase , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) __A = [] __A = [] def tee(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase="" ): __A = line.decode('''utf-8''' ).rstrip() sink.append(__UpperCamelCase ) if not quiet: print(__UpperCamelCase , __UpperCamelCase , file=__UpperCamelCase ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ asyncio.create_task(_read_stream(p.stdout , lambda __UpperCamelCase : tee(__UpperCamelCase , __UpperCamelCase , sys.stdout , label='''stdout:''' ) ) ), asyncio.create_task(_read_stream(p.stderr , lambda __UpperCamelCase : tee(__UpperCamelCase , __UpperCamelCase , sys.stderr , label='''stderr:''' ) ) ), ] , timeout=__UpperCamelCase , ) return _RunOutput(await p.wait() , __UpperCamelCase , __UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=1_8_0 , __UpperCamelCase=False , __UpperCamelCase=True ): """simple docstring""" __A = asyncio.get_event_loop() __A = loop.run_until_complete( _stream_subprocess(__UpperCamelCase , env=__UpperCamelCase , stdin=__UpperCamelCase , timeout=__UpperCamelCase , quiet=__UpperCamelCase , echo=__UpperCamelCase ) ) __A = ''' '''.join(__UpperCamelCase ) if result.returncode > 0: __A = '''\n'''.join(result.stderr ) raise RuntimeError( f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n' f'The combined stderr from workers follows:\n{stderr}' ) return result class snake_case ( _lowerCAmelCase ): '''simple docstring''' pass def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase=False ): """simple docstring""" try: __A = subprocess.check_output(__UpperCamelCase , stderr=subprocess.STDOUT ) if return_stdout: if hasattr(__UpperCamelCase , '''decode''' ): __A = output.decode('''utf-8''' ) return output except subprocess.CalledProcessError as e: raise SubprocessCallException( f'Command `{" ".join(__UpperCamelCase )}` failed with the following error:\n\n{e.output.decode()}' ) from e
266
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : Optional[int] ): '''simple docstring''' for model_result in results.values(): for batch_size, sequence_length in zip(model_result['''bs'''], model_result['''ss'''] ): __A = model_result['''result'''][batch_size][sequence_length] self.assertIsNotNone(_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = '''sgugger/tiny-distilbert-classification''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, only_pretrain_model=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, torchscript=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == '''cpu''', '''Cant do half precision''' ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, fpaa=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = AutoConfig.from_pretrained(_lowerCamelCase ) # set architectures equal to `None` __A = None __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase, configs=[config] ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == '''cpu''', '''Can\'t do half precision''' ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], fpaa=_lowerCamelCase, multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = AutoConfig.from_pretrained(_lowerCamelCase ) __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase, configs=[config] ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' __A = '''sshleifer/tinier_bart''' __A = AutoConfig.from_pretrained(_lowerCamelCase ) __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase, configs=[config] ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = AutoConfig.from_pretrained(_lowerCamelCase ) __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase, configs=[config] ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = '''sshleifer/tinier_bart''' __A = AutoConfig.from_pretrained(_lowerCamelCase ) __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase, configs=[config] ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' with tempfile.TemporaryDirectory() as tmp_dir: __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, save_to_csv=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], inference_time_csv_file=os.path.join(_lowerCamelCase, '''inf_time.csv''' ), train_memory_csv_file=os.path.join(_lowerCamelCase, '''train_mem.csv''' ), inference_memory_csv_file=os.path.join(_lowerCamelCase, '''inf_mem.csv''' ), train_time_csv_file=os.path.join(_lowerCamelCase, '''train_time.csv''' ), env_info_csv_file=os.path.join(_lowerCamelCase, '''env.csv''' ), multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) benchmark.run() self.assertTrue(Path(os.path.join(_lowerCamelCase, '''inf_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCamelCase, '''train_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCamelCase, '''inf_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCamelCase, '''train_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCamelCase, '''env.csv''' ) ).exists() ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' def _check_summary_is_not_empty(_lowerCamelCase : List[Any] ): self.assertTrue(hasattr(_lowerCamelCase, '''sequential''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''cumulative''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''current''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''total''' ) ) with tempfile.TemporaryDirectory() as tmp_dir: __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], log_filename=os.path.join(_lowerCamelCase, '''log.txt''' ), log_print=_lowerCamelCase, trace_memory_line_by_line=_lowerCamelCase, multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(_lowerCamelCase, '''log.txt''' ) ).exists() )
266
1
"""simple docstring""" lowercase_ = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' def lowerCAmelCase ( ): """simple docstring""" __A = input('''Enter message: ''' ) __A = input('''Enter key [alphanumeric]: ''' ) __A = input('''Encrypt/Decrypt [e/d]: ''' ) if mode.lower().startswith('''e''' ): __A = '''encrypt''' __A = encrypt_message(__UpperCamelCase , __UpperCamelCase ) elif mode.lower().startswith('''d''' ): __A = '''decrypt''' __A = decrypt_message(__UpperCamelCase , __UpperCamelCase ) print(f'\n{mode.title()}ed message:' ) print(__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" return translate_message(__UpperCamelCase , __UpperCamelCase , '''encrypt''' ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" return translate_message(__UpperCamelCase , __UpperCamelCase , '''decrypt''' ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" __A = [] __A = 0 __A = key.upper() for symbol in message: __A = LETTERS.find(symbol.upper() ) if num != -1: if mode == "encrypt": num += LETTERS.find(key[key_index] ) elif mode == "decrypt": num -= LETTERS.find(key[key_index] ) num %= len(__UpperCamelCase ) if symbol.isupper(): translated.append(LETTERS[num] ) elif symbol.islower(): translated.append(LETTERS[num].lower() ) key_index += 1 if key_index == len(__UpperCamelCase ): __A = 0 else: translated.append(__UpperCamelCase ) return "".join(__UpperCamelCase ) if __name__ == "__main__": main()
266
"""simple docstring""" import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowercase_ = get_tests_dir('fixtures/test_sentencepiece_no_bos.model') @require_sentencepiece @require_tokenizers class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : Optional[Any] = PegasusTokenizer A_ : int = PegasusTokenizerFast A_ : Optional[Any] = True A_ : Union[str, Any] = True def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing __A = PegasusTokenizer(_lowerCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' return PegasusTokenizer.from_pretrained('''google/pegasus-large''' ) def _SCREAMING_SNAKE_CASE ( self : int, **_lowerCamelCase : List[Any] ): '''simple docstring''' return PegasusTokenizer.from_pretrained(self.tmpdirname, **_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : Dict ): '''simple docstring''' return ("This is a test", "This is a test") def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' __A = '''</s>''' __A = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ), _lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ), _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' __A = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0], '''<pad>''' ) self.assertEqual(vocab_keys[1], '''</s>''' ) self.assertEqual(vocab_keys[-1], '''v''' ) self.assertEqual(len(_lowerCamelCase ), 11_03 ) def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size, 11_03 ) def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) __A = self.tokenizer_class.from_pretrained(self.tmpdirname ) __A = ( '''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important''' ''' </s> <pad> <pad> <pad>''' ) __A = rust_tokenizer([raw_input_str], return_tensors=_lowerCamelCase, add_special_tokens=_lowerCamelCase ).input_ids[0] __A = py_tokenizer([raw_input_str], return_tensors=_lowerCamelCase, add_special_tokens=_lowerCamelCase ).input_ids[0] self.assertListEqual(_lowerCamelCase, _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' __A = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word __A = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.''' __A = [2, 4_13, 6_15, 1_14, 3, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1] __A = tokenizer([raw_input_str], return_tensors=_lowerCamelCase ).input_ids[0] self.assertListEqual(_lowerCamelCase, _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 9_61_03 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 1_03 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_05 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 10_24 __A = '''To ensure a smooth flow of bank resolutions.''' __A = [4_13, 6_15, 1_14, 22_91, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1] __A = tokenizer([raw_input_str], return_tensors=_lowerCamelCase ).input_ids[0] self.assertListEqual(_lowerCamelCase, _lowerCamelCase ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = ['''This is going to be way too long.''' * 1_50, '''short example'''] __A = ['''not super long but more than 5 tokens''', '''tiny'''] __A = self._large_tokenizer(_lowerCamelCase, padding=_lowerCamelCase, truncation=_lowerCamelCase, return_tensors='''pt''' ) __A = self._large_tokenizer( text_target=_lowerCamelCase, max_length=5, padding=_lowerCamelCase, truncation=_lowerCamelCase, return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 10_24) assert batch.attention_mask.shape == (2, 10_24) assert targets["input_ids"].shape == (2, 5) assert len(_lowerCamelCase ) == 2 # input_ids, attention_mask. @slow def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' # fmt: off __A = {'''input_ids''': [[3_89_79, 1_43, 1_84_85, 6_06, 1_30, 2_66_69, 8_76_86, 1_21, 5_41_89, 11_29, 1_11, 2_66_69, 8_76_86, 1_21, 91_14, 1_47_87, 1_21, 1_32_49, 1_58, 5_92, 9_56, 1_21, 1_46_21, 3_15_76, 1_43, 6_26_13, 1_08, 96_88, 9_30, 4_34_30, 1_15_62, 6_26_13, 3_04, 1_08, 1_14_43, 8_97, 1_08, 93_14, 1_74_15, 6_33_99, 1_08, 1_14_43, 76_14, 1_83_16, 1_18, 42_84, 71_48, 1_24_30, 1_43, 14_00, 2_57_03, 1_58, 1_11, 42_84, 71_48, 1_17_72, 1_43, 2_12_97, 10_64, 1_58, 1_22, 2_04, 35_06, 17_54, 11_33, 1_47_87, 15_81, 1_15, 3_32_24, 44_82, 1_11, 13_55, 1_10, 2_91_73, 3_17, 5_08_33, 1_08, 2_01_47, 9_46_65, 1_11, 7_71_98, 1_07, 1], [1_10, 6_26_13, 1_17, 6_38, 1_12, 11_33, 1_21, 2_00_98, 13_55, 7_90_50, 1_38_72, 1_35, 15_96, 5_35_41, 13_52, 1_41, 1_30_39, 55_42, 1_24, 3_02, 5_18, 1_11, 2_68, 29_56, 1_15, 1_49, 44_27, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_39, 12_35, 27_99, 1_82_89, 1_77_80, 2_04, 1_09, 94_74, 12_96, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_lowerCamelCase, model_name='''google/bigbird-pegasus-large-arxiv''', revision='''ba85d0851d708441f91440d509690f1ab6353415''', ) @require_sentencepiece @require_tokenizers class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : str = PegasusTokenizer A_ : Union[str, Any] = PegasusTokenizerFast A_ : Any = True A_ : str = True def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing __A = PegasusTokenizer(_lowerCamelCase, offset=0, mask_token_sent=_lowerCamelCase, mask_token='''[MASK]''' ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' ) def _SCREAMING_SNAKE_CASE ( self : Optional[int], **_lowerCamelCase : Dict ): '''simple docstring''' return PegasusTokenizer.from_pretrained(self.tmpdirname, **_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : List[str] ): '''simple docstring''' return ("This is a test", "This is a test") def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' __A = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) __A = self.tokenizer_class.from_pretrained(self.tmpdirname ) __A = ( '''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>''' ''' <pad> <pad> <pad>''' ) __A = rust_tokenizer([raw_input_str], return_tensors=_lowerCamelCase, add_special_tokens=_lowerCamelCase ).input_ids[0] __A = py_tokenizer([raw_input_str], return_tensors=_lowerCamelCase, add_special_tokens=_lowerCamelCase ).input_ids[0] self.assertListEqual(_lowerCamelCase, _lowerCamelCase ) @require_torch def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' __A = ['''This is going to be way too long.''' * 10_00, '''short example'''] __A = ['''not super long but more than 5 tokens''', '''tiny'''] __A = self._large_tokenizer(_lowerCamelCase, padding=_lowerCamelCase, truncation=_lowerCamelCase, return_tensors='''pt''' ) __A = self._large_tokenizer( text_target=_lowerCamelCase, max_length=5, padding=_lowerCamelCase, truncation=_lowerCamelCase, return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 40_96) assert batch.attention_mask.shape == (2, 40_96) assert targets["input_ids"].shape == (2, 5) assert len(_lowerCamelCase ) == 2 # input_ids, attention_mask. def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' __A = ( '''This is an example string that is used to test the original TF implementation against the HF''' ''' implementation''' ) __A = self._large_tokenizer(_lowerCamelCase ).input_ids self.assertListEqual( _lowerCamelCase, [1_82, 1_17, 1_42, 5_87, 42_11, 1_20, 1_17, 2_63, 1_12, 8_04, 1_09, 8_56, 2_50_16, 31_37, 4_64, 1_09, 2_69_55, 31_37, 1], )
266
1
"""simple docstring""" from ..utils import DummyObject, requires_backends class snake_case ( metaclass=_lowerCAmelCase ): '''simple docstring''' A_ : Optional[int] = ["sentencepiece"] def __init__( self : int, *_lowerCamelCase : Optional[Any], **_lowerCamelCase : Tuple ): '''simple docstring''' requires_backends(self, ['''sentencepiece'''] ) class snake_case ( metaclass=_lowerCAmelCase ): '''simple docstring''' A_ : List[str] = ["sentencepiece"] def __init__( self : Optional[int], *_lowerCamelCase : Optional[Any], **_lowerCamelCase : Union[str, Any] ): '''simple docstring''' requires_backends(self, ['''sentencepiece'''] ) class snake_case ( metaclass=_lowerCAmelCase ): '''simple docstring''' A_ : Union[str, Any] = ["sentencepiece"] def __init__( self : List[Any], *_lowerCamelCase : Any, **_lowerCamelCase : int ): '''simple docstring''' requires_backends(self, ['''sentencepiece'''] ) class snake_case ( metaclass=_lowerCAmelCase ): '''simple docstring''' A_ : List[Any] = ["sentencepiece"] def __init__( self : Optional[int], *_lowerCamelCase : Any, **_lowerCamelCase : Any ): '''simple docstring''' requires_backends(self, ['''sentencepiece'''] ) class snake_case ( metaclass=_lowerCAmelCase ): '''simple docstring''' A_ : Optional[int] = ["sentencepiece"] def __init__( self : Tuple, *_lowerCamelCase : List[Any], **_lowerCamelCase : Any ): '''simple docstring''' requires_backends(self, ['''sentencepiece'''] ) class snake_case ( metaclass=_lowerCAmelCase ): '''simple docstring''' A_ : Dict = ["sentencepiece"] def __init__( self : List[Any], *_lowerCamelCase : str, **_lowerCamelCase : Tuple ): '''simple docstring''' requires_backends(self, ['''sentencepiece'''] ) class snake_case ( metaclass=_lowerCAmelCase ): '''simple docstring''' A_ : int = ["sentencepiece"] def __init__( self : int, *_lowerCamelCase : Optional[int], **_lowerCamelCase : int ): '''simple docstring''' requires_backends(self, ['''sentencepiece'''] ) class snake_case ( metaclass=_lowerCAmelCase ): '''simple docstring''' A_ : int = ["sentencepiece"] def __init__( self : Union[str, Any], *_lowerCamelCase : Union[str, Any], **_lowerCamelCase : Optional[int] ): '''simple docstring''' requires_backends(self, ['''sentencepiece'''] ) class snake_case ( metaclass=_lowerCAmelCase ): '''simple docstring''' A_ : int = ["sentencepiece"] def __init__( self : int, *_lowerCamelCase : Optional[Any], **_lowerCamelCase : Tuple ): '''simple docstring''' requires_backends(self, ['''sentencepiece'''] ) class snake_case ( metaclass=_lowerCAmelCase ): '''simple docstring''' A_ : List[Any] = ["sentencepiece"] def __init__( self : Any, *_lowerCamelCase : Tuple, **_lowerCamelCase : List[Any] ): '''simple docstring''' requires_backends(self, ['''sentencepiece'''] ) class snake_case ( metaclass=_lowerCAmelCase ): '''simple docstring''' A_ : Optional[Any] = ["sentencepiece"] def __init__( self : Union[str, Any], *_lowerCamelCase : str, **_lowerCamelCase : Dict ): '''simple docstring''' requires_backends(self, ['''sentencepiece'''] ) class snake_case ( metaclass=_lowerCAmelCase ): '''simple docstring''' A_ : Tuple = ["sentencepiece"] def __init__( self : Union[str, Any], *_lowerCamelCase : Optional[Any], **_lowerCamelCase : Optional[Any] ): '''simple docstring''' requires_backends(self, ['''sentencepiece'''] ) class snake_case ( metaclass=_lowerCAmelCase ): '''simple docstring''' A_ : Optional[int] = ["sentencepiece"] def __init__( self : str, *_lowerCamelCase : Optional[int], **_lowerCamelCase : Dict ): '''simple docstring''' requires_backends(self, ['''sentencepiece'''] ) class snake_case ( metaclass=_lowerCAmelCase ): '''simple docstring''' A_ : Union[str, Any] = ["sentencepiece"] def __init__( self : List[Any], *_lowerCamelCase : List[str], **_lowerCamelCase : int ): '''simple docstring''' requires_backends(self, ['''sentencepiece'''] ) class snake_case ( metaclass=_lowerCAmelCase ): '''simple docstring''' A_ : List[str] = ["sentencepiece"] def __init__( self : List[Any], *_lowerCamelCase : Tuple, **_lowerCamelCase : List[Any] ): '''simple docstring''' requires_backends(self, ['''sentencepiece'''] ) class snake_case ( metaclass=_lowerCAmelCase ): '''simple docstring''' A_ : str = ["sentencepiece"] def __init__( self : List[Any], *_lowerCamelCase : List[str], **_lowerCamelCase : Optional[int] ): '''simple docstring''' requires_backends(self, ['''sentencepiece'''] ) class snake_case ( metaclass=_lowerCAmelCase ): '''simple docstring''' A_ : Any = ["sentencepiece"] def __init__( self : Union[str, Any], *_lowerCamelCase : Optional[int], **_lowerCamelCase : Any ): '''simple docstring''' requires_backends(self, ['''sentencepiece'''] ) class snake_case ( metaclass=_lowerCAmelCase ): '''simple docstring''' A_ : Any = ["sentencepiece"] def __init__( self : int, *_lowerCamelCase : Union[str, Any], **_lowerCamelCase : List[str] ): '''simple docstring''' requires_backends(self, ['''sentencepiece'''] ) class snake_case ( metaclass=_lowerCAmelCase ): '''simple docstring''' A_ : List[str] = ["sentencepiece"] def __init__( self : List[str], *_lowerCamelCase : Dict, **_lowerCamelCase : Dict ): '''simple docstring''' requires_backends(self, ['''sentencepiece'''] ) class snake_case ( metaclass=_lowerCAmelCase ): '''simple docstring''' A_ : Any = ["sentencepiece"] def __init__( self : Optional[int], *_lowerCamelCase : Optional[int], **_lowerCamelCase : Optional[int] ): '''simple docstring''' requires_backends(self, ['''sentencepiece'''] ) class snake_case ( metaclass=_lowerCAmelCase ): '''simple docstring''' A_ : Dict = ["sentencepiece"] def __init__( self : Union[str, Any], *_lowerCamelCase : Optional[Any], **_lowerCamelCase : Any ): '''simple docstring''' requires_backends(self, ['''sentencepiece'''] ) class snake_case ( metaclass=_lowerCAmelCase ): '''simple docstring''' A_ : Any = ["sentencepiece"] def __init__( self : int, *_lowerCamelCase : Optional[Any], **_lowerCamelCase : Optional[int] ): '''simple docstring''' requires_backends(self, ['''sentencepiece'''] ) class snake_case ( metaclass=_lowerCAmelCase ): '''simple docstring''' A_ : Union[str, Any] = ["sentencepiece"] def __init__( self : Union[str, Any], *_lowerCamelCase : Union[str, Any], **_lowerCamelCase : Dict ): '''simple docstring''' requires_backends(self, ['''sentencepiece'''] ) class snake_case ( metaclass=_lowerCAmelCase ): '''simple docstring''' A_ : int = ["sentencepiece"] def __init__( self : List[Any], *_lowerCamelCase : Optional[Any], **_lowerCamelCase : int ): '''simple docstring''' requires_backends(self, ['''sentencepiece'''] ) class snake_case ( metaclass=_lowerCAmelCase ): '''simple docstring''' A_ : str = ["sentencepiece"] def __init__( self : Tuple, *_lowerCamelCase : Union[str, Any], **_lowerCamelCase : int ): '''simple docstring''' requires_backends(self, ['''sentencepiece'''] ) class snake_case ( metaclass=_lowerCAmelCase ): '''simple docstring''' A_ : str = ["sentencepiece"] def __init__( self : List[Any], *_lowerCamelCase : Optional[int], **_lowerCamelCase : Union[str, Any] ): '''simple docstring''' requires_backends(self, ['''sentencepiece'''] ) class snake_case ( metaclass=_lowerCAmelCase ): '''simple docstring''' A_ : int = ["sentencepiece"] def __init__( self : Dict, *_lowerCamelCase : List[str], **_lowerCamelCase : List[Any] ): '''simple docstring''' requires_backends(self, ['''sentencepiece'''] ) class snake_case ( metaclass=_lowerCAmelCase ): '''simple docstring''' A_ : List[Any] = ["sentencepiece"] def __init__( self : Union[str, Any], *_lowerCamelCase : Optional[int], **_lowerCamelCase : List[str] ): '''simple docstring''' requires_backends(self, ['''sentencepiece'''] ) class snake_case ( metaclass=_lowerCAmelCase ): '''simple docstring''' A_ : Optional[Any] = ["sentencepiece"] def __init__( self : Tuple, *_lowerCamelCase : Optional[Any], **_lowerCamelCase : Optional[int] ): '''simple docstring''' requires_backends(self, ['''sentencepiece'''] ) class snake_case ( metaclass=_lowerCAmelCase ): '''simple docstring''' A_ : Any = ["sentencepiece"] def __init__( self : Any, *_lowerCamelCase : Dict, **_lowerCamelCase : List[Any] ): '''simple docstring''' requires_backends(self, ['''sentencepiece'''] ) class snake_case ( metaclass=_lowerCAmelCase ): '''simple docstring''' A_ : List[str] = ["sentencepiece"] def __init__( self : Dict, *_lowerCamelCase : Any, **_lowerCamelCase : Optional[int] ): '''simple docstring''' requires_backends(self, ['''sentencepiece'''] )
266
"""simple docstring""" import re def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" return [char.split() for char in re.split(r'''[^ a-z A-Z 0-9 \s]''' , str_ )] def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = split_input(str_ ) return "".join( [''''''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" try: __A = split_input(__UpperCamelCase ) if upper: __A = ''''''.join( [ separator.join([char.upper() for char in sub_str] ) for sub_str in string_split ] ) else: __A = ''''''.join( [ separator.join([char.lower() for char in sub_str] ) for sub_str in string_split ] ) return res_str except IndexError: return "not valid string" def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" return to_simple_case(__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" try: __A = to_simple_case(__UpperCamelCase ) return res_str[0].lower() + res_str[1:] except IndexError: return "not valid string" def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" return to_complex_case(__UpperCamelCase , __UpperCamelCase , '''_''' ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" return to_complex_case(__UpperCamelCase , __UpperCamelCase , '''-''' ) if __name__ == "__main__": __import__('doctest').testmod()
266
1
"""simple docstring""" from __future__ import annotations import unittest from transformers import DebertaVaConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDebertaVaForMaskedLM, TFDebertaVaForQuestionAnswering, TFDebertaVaForSequenceClassification, TFDebertaVaForTokenClassification, TFDebertaVaModel, ) class snake_case : '''simple docstring''' def __init__( self : Optional[Any], _lowerCamelCase : List[str], _lowerCamelCase : List[str]=13, _lowerCamelCase : Union[str, Any]=7, _lowerCamelCase : Tuple=True, _lowerCamelCase : List[str]=True, _lowerCamelCase : Union[str, Any]=True, _lowerCamelCase : List[str]=True, _lowerCamelCase : Tuple=99, _lowerCamelCase : Optional[Any]=32, _lowerCamelCase : Optional[int]=2, _lowerCamelCase : Optional[Any]=4, _lowerCamelCase : int=37, _lowerCamelCase : Dict="gelu", _lowerCamelCase : str=0.1, _lowerCamelCase : str=0.1, _lowerCamelCase : Dict=5_12, _lowerCamelCase : Optional[Any]=16, _lowerCamelCase : Optional[Any]=2, _lowerCamelCase : int=0.02, _lowerCamelCase : Optional[Any]=False, _lowerCamelCase : List[Any]=True, _lowerCamelCase : Optional[Any]="None", _lowerCamelCase : int=3, _lowerCamelCase : int=4, _lowerCamelCase : Union[str, Any]=None, ): '''simple docstring''' __A = parent __A = batch_size __A = seq_length __A = is_training __A = use_input_mask __A = use_token_type_ids __A = use_labels __A = vocab_size __A = hidden_size __A = num_hidden_layers __A = num_attention_heads __A = intermediate_size __A = hidden_act __A = hidden_dropout_prob __A = attention_probs_dropout_prob __A = max_position_embeddings __A = type_vocab_size __A = type_sequence_label_size __A = initializer_range __A = num_labels __A = num_choices __A = relative_attention __A = position_biased_input __A = pos_att_type __A = scope def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' __A = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) __A = None if self.use_input_mask: __A = random_attention_mask([self.batch_size, self.seq_length] ) __A = None if self.use_token_type_ids: __A = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size ) __A = None __A = None __A = None if self.use_labels: __A = ids_tensor([self.batch_size], self.type_sequence_label_size ) __A = ids_tensor([self.batch_size, self.seq_length], self.num_labels ) __A = DebertaVaConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, relative_attention=self.relative_attention, position_biased_input=self.position_biased_input, initializer_range=self.initializer_range, return_dict=_lowerCamelCase, ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : Union[str, Any], _lowerCamelCase : Any, _lowerCamelCase : Optional[int], _lowerCamelCase : str, _lowerCamelCase : Optional[int], _lowerCamelCase : Tuple, _lowerCamelCase : List[str] ): '''simple docstring''' __A = TFDebertaVaModel(config=_lowerCamelCase ) __A = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} __A = [input_ids, input_mask] __A = model(_lowerCamelCase ) __A = model(_lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : int, _lowerCamelCase : int, _lowerCamelCase : Union[str, Any], _lowerCamelCase : Tuple, _lowerCamelCase : Optional[Any], _lowerCamelCase : Optional[int], _lowerCamelCase : int, _lowerCamelCase : Optional[int] ): '''simple docstring''' __A = TFDebertaVaForMaskedLM(config=_lowerCamelCase ) __A = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } __A = model(_lowerCamelCase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) ) def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : Optional[int], _lowerCamelCase : Union[str, Any], _lowerCamelCase : Union[str, Any], _lowerCamelCase : Tuple, _lowerCamelCase : Any, _lowerCamelCase : Optional[int], _lowerCamelCase : Any ): '''simple docstring''' __A = self.num_labels __A = TFDebertaVaForSequenceClassification(config=_lowerCamelCase ) __A = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } __A = model(_lowerCamelCase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : str, _lowerCamelCase : List[Any], _lowerCamelCase : Any, _lowerCamelCase : Union[str, Any], _lowerCamelCase : int, _lowerCamelCase : Tuple, _lowerCamelCase : Optional[int] ): '''simple docstring''' __A = self.num_labels __A = TFDebertaVaForTokenClassification(config=_lowerCamelCase ) __A = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } __A = model(_lowerCamelCase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : Optional[int], _lowerCamelCase : str, _lowerCamelCase : Optional[int], _lowerCamelCase : Dict, _lowerCamelCase : Union[str, Any], _lowerCamelCase : int, _lowerCamelCase : List[str] ): '''simple docstring''' __A = TFDebertaVaForQuestionAnswering(config=_lowerCamelCase ) __A = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } __A = model(_lowerCamelCase ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) ) def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' __A = self.prepare_config_and_inputs() ( ( __A ) , ( __A ) , ( __A ) , ( __A ) , ( __A ) , ( __A ) , ( __A ) , ) = config_and_inputs __A = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class snake_case ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : List[Any] = ( ( TFDebertaVaModel, TFDebertaVaForMaskedLM, TFDebertaVaForQuestionAnswering, TFDebertaVaForSequenceClassification, TFDebertaVaForTokenClassification, ) if is_tf_available() else () ) A_ : Optional[Any] = ( { "feature-extraction": TFDebertaVaModel, "fill-mask": TFDebertaVaForMaskedLM, "question-answering": TFDebertaVaForQuestionAnswering, "text-classification": TFDebertaVaForSequenceClassification, "token-classification": TFDebertaVaForTokenClassification, "zero-shot": TFDebertaVaForSequenceClassification, } if is_tf_available() else {} ) A_ : List[Any] = False A_ : List[str] = False def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' __A = TFDebertaVaModelTester(self ) __A = ConfigTester(self, config_class=_lowerCamelCase, hidden_size=37 ) def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : List[str] ): '''simple docstring''' __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : List[str] ): '''simple docstring''' __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_lowerCamelCase ) @slow def _SCREAMING_SNAKE_CASE ( self : List[str] ): '''simple docstring''' __A = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' ) self.assertIsNotNone(_lowerCamelCase ) @require_tf class snake_case ( unittest.TestCase ): '''simple docstring''' @unittest.skip(reason='''Model not available yet''' ) def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' pass @slow def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' __A = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' ) __A = tf.constant([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] ) __A = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) __A = model(_lowerCamelCase, attention_mask=_lowerCamelCase )[0] __A = tf.constant( [[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] ) tf.debugging.assert_near(output[:, 1:4, 1:4], _lowerCamelCase, atol=1e-4 )
266
"""simple docstring""" from __future__ import annotations class snake_case : '''simple docstring''' def __init__( self : int, _lowerCamelCase : List[Any]=None ): '''simple docstring''' __A = data __A = None def __repr__( self : Union[str, Any] ): '''simple docstring''' __A = [] __A = self while temp: string_rep.append(f'{temp.data}' ) __A = temp.next return "->".join(_lowerCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" if not elements_list: raise Exception('''The Elements List is empty''' ) __A = __A = Node(elements_list[0] ) for i in range(1 , len(__UpperCamelCase ) ): __A = Node(elements_list[i] ) __A = current.next return head def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" if head_node is not None and isinstance(__UpperCamelCase , __UpperCamelCase ): print_reverse(head_node.next ) print(head_node.data ) def lowerCAmelCase ( ): """simple docstring""" from doctest import testmod testmod() __A = make_linked_list([1_4, 5_2, 1_4, 1_2, 4_3] ) print('''Linked List:''' ) print(__UpperCamelCase ) print('''Elements in Reverse:''' ) print_reverse(__UpperCamelCase ) if __name__ == "__main__": main()
266
1
"""simple docstring""" def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" __A = word.split() def justify(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> str: __A = max_width - width __A = len(__UpperCamelCase ) if len(__UpperCamelCase ) == 1: # if there is only word in line # just insert overall_spaces_count for the remainder of line return line[0] + " " * overall_spaces_count else: __A = words_count - 1 # num_spaces_between_words_list[i] : tells you to insert # num_spaces_between_words_list[i] spaces # after word on line[i] __A = spaces_to_insert_between_words * [ overall_spaces_count // spaces_to_insert_between_words ] __A = ( overall_spaces_count % spaces_to_insert_between_words ) # distribute spaces via round robin to the left words for i in range(__UpperCamelCase ): num_spaces_between_words_list[i] += 1 __A = [] for i in range(__UpperCamelCase ): # add the word aligned_words_list.append(line[i] ) # add the spaces to insert aligned_words_list.append(num_spaces_between_words_list[i] * ''' ''' ) # just add the last word to the sentence aligned_words_list.append(line[-1] ) # join the aligned words list to form a justified line return "".join(__UpperCamelCase ) __A = [] __A = [] __A = 0 for word in words: if width + len(__UpperCamelCase ) + len(__UpperCamelCase ) <= max_width: # keep adding words until we can fill out max_width # width = sum of length of all words (without overall_spaces_count) # len(word) = length of current word # len(line) = number of overall_spaces_count to insert between words line.append(__UpperCamelCase ) width += len(__UpperCamelCase ) else: # justify the line and add it to result answer.append(justify(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) ) # reset new line and new width __A , __A = [word], len(__UpperCamelCase ) __A = max_width - width - len(__UpperCamelCase ) answer.append(''' '''.join(__UpperCamelCase ) + (remaining_spaces + 1) * ''' ''' ) return answer if __name__ == "__main__": from doctest import testmod testmod()
266
"""simple docstring""" from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging lowercase_ = logging.get_logger(__name__) class snake_case ( _lowerCAmelCase ): '''simple docstring''' A_ : int = ["input_features", "attention_mask"] def __init__( self : Optional[Any], _lowerCamelCase : Union[str, Any]=80, _lowerCamelCase : int=1_60_00, _lowerCamelCase : Any=80, _lowerCamelCase : List[str]=0.0, _lowerCamelCase : int=True, _lowerCamelCase : Optional[Any]=True, _lowerCamelCase : Optional[int]=True, **_lowerCamelCase : List[str], ): '''simple docstring''' super().__init__(feature_size=_lowerCamelCase, sampling_rate=_lowerCamelCase, padding_value=_lowerCamelCase, **_lowerCamelCase ) __A = num_mel_bins __A = do_ceptral_normalize __A = normalize_means __A = normalize_vars __A = True def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : np.ndarray, ): '''simple docstring''' __A = waveform * (2**15) # Kaldi compliance: 16-bit signed integers __A = torch.from_numpy(_lowerCamelCase ).unsqueeze(0 ) __A = ta_kaldi.fbank(_lowerCamelCase, num_mel_bins=self.num_mel_bins, sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : np.ndarray, _lowerCamelCase : int, _lowerCamelCase : Optional[bool] = True, _lowerCamelCase : Optional[bool] = True, _lowerCamelCase : float = 0.0, ): '''simple docstring''' # make sure we normalize float32 arrays if normalize_means: __A = x[:input_length].mean(axis=0 ) __A = np.subtract(_lowerCamelCase, _lowerCamelCase ) if normalize_vars: __A = x[:input_length].std(axis=0 ) __A = np.divide(_lowerCamelCase, _lowerCamelCase ) if input_length < x.shape[0]: __A = padding_value # make sure array is in float32 __A = x.astype(np.floataa ) return x def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : List[np.ndarray], _lowerCamelCase : Optional[np.ndarray] = None ): '''simple docstring''' __A = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(_lowerCamelCase, _lowerCamelCase, self.normalize_means, self.normalize_vars, self.padding_value ) for x, n in zip(_lowerCamelCase, _lowerCamelCase ) ] def __call__( self : Optional[Any], _lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], _lowerCamelCase : Union[bool, str, PaddingStrategy] = False, _lowerCamelCase : Optional[int] = None, _lowerCamelCase : bool = False, _lowerCamelCase : Optional[int] = None, _lowerCamelCase : Optional[Union[str, TensorType]] = None, _lowerCamelCase : Optional[int] = None, _lowerCamelCase : Optional[bool] = None, **_lowerCamelCase : Optional[Any], ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of' f' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with' f' {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) __A = isinstance(_lowerCamelCase, np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'Only mono-channel audio is supported for input to {self}' ) __A = is_batched_numpy or ( isinstance(_lowerCamelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) )) ) if is_batched: __A = [np.asarray(_lowerCamelCase, dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(_lowerCamelCase, np.ndarray ): __A = np.asarray(_lowerCamelCase, dtype=np.floataa ) elif isinstance(_lowerCamelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __A = raw_speech.astype(np.floataa ) # always return batch if not is_batched: __A = [raw_speech] # extract fbank features __A = [self._extract_fbank_features(_lowerCamelCase ) for waveform in raw_speech] # convert into correct format for padding __A = BatchFeature({'''input_features''': features} ) __A = self.pad( _lowerCamelCase, padding=_lowerCamelCase, max_length=_lowerCamelCase, truncation=_lowerCamelCase, pad_to_multiple_of=_lowerCamelCase, return_attention_mask=_lowerCamelCase, **_lowerCamelCase, ) # make sure list is in array format __A = padded_inputs.get('''input_features''' ) if isinstance(input_features[0], _lowerCamelCase ): __A = [np.asarray(_lowerCamelCase, dtype=np.floataa ) for feature in input_features] __A = padded_inputs.get('''attention_mask''' ) if attention_mask is not None: __A = [np.asarray(_lowerCamelCase, dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: __A = ( np.array(_lowerCamelCase, dtype=np.intaa ) if self._get_padding_strategies(_lowerCamelCase, max_length=_lowerCamelCase ) is not PaddingStrategy.DO_NOT_PAD else None ) __A = self.normalize( padded_inputs['''input_features'''], attention_mask=_lowerCamelCase ) if return_tensors is not None: __A = padded_inputs.convert_to_tensors(_lowerCamelCase ) return padded_inputs
266
1
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : Optional[int] ): '''simple docstring''' for model_result in results.values(): for batch_size, sequence_length in zip(model_result['''bs'''], model_result['''ss'''] ): __A = model_result['''result'''][batch_size][sequence_length] self.assertIsNotNone(_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = '''sgugger/tiny-distilbert-classification''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, only_pretrain_model=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, torchscript=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == '''cpu''', '''Cant do half precision''' ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, fpaa=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = AutoConfig.from_pretrained(_lowerCamelCase ) # set architectures equal to `None` __A = None __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase, configs=[config] ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == '''cpu''', '''Can\'t do half precision''' ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], fpaa=_lowerCamelCase, multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = AutoConfig.from_pretrained(_lowerCamelCase ) __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase, configs=[config] ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' __A = '''sshleifer/tinier_bart''' __A = AutoConfig.from_pretrained(_lowerCamelCase ) __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase, configs=[config] ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = AutoConfig.from_pretrained(_lowerCamelCase ) __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase, configs=[config] ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = '''sshleifer/tinier_bart''' __A = AutoConfig.from_pretrained(_lowerCamelCase ) __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase, configs=[config] ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' with tempfile.TemporaryDirectory() as tmp_dir: __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, save_to_csv=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], inference_time_csv_file=os.path.join(_lowerCamelCase, '''inf_time.csv''' ), train_memory_csv_file=os.path.join(_lowerCamelCase, '''train_mem.csv''' ), inference_memory_csv_file=os.path.join(_lowerCamelCase, '''inf_mem.csv''' ), train_time_csv_file=os.path.join(_lowerCamelCase, '''train_time.csv''' ), env_info_csv_file=os.path.join(_lowerCamelCase, '''env.csv''' ), multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) benchmark.run() self.assertTrue(Path(os.path.join(_lowerCamelCase, '''inf_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCamelCase, '''train_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCamelCase, '''inf_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCamelCase, '''train_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCamelCase, '''env.csv''' ) ).exists() ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' def _check_summary_is_not_empty(_lowerCamelCase : List[Any] ): self.assertTrue(hasattr(_lowerCamelCase, '''sequential''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''cumulative''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''current''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''total''' ) ) with tempfile.TemporaryDirectory() as tmp_dir: __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], log_filename=os.path.join(_lowerCamelCase, '''log.txt''' ), log_print=_lowerCamelCase, trace_memory_line_by_line=_lowerCamelCase, multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(_lowerCamelCase, '''log.txt''' ) ).exists() )
266
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class snake_case ( unittest.TestCase ): '''simple docstring''' def __init__( self : List[str], _lowerCamelCase : Optional[Any], _lowerCamelCase : Union[str, Any]=13, _lowerCamelCase : Any=3, _lowerCamelCase : Optional[int]=2_24, _lowerCamelCase : str=30, _lowerCamelCase : Dict=4_00, _lowerCamelCase : Union[str, Any]=True, _lowerCamelCase : Any=None, _lowerCamelCase : Optional[Any]=True, _lowerCamelCase : Any=[0.5, 0.5, 0.5], _lowerCamelCase : List[str]=[0.5, 0.5, 0.5], ): '''simple docstring''' __A = size if size is not None else {'''height''': 18, '''width''': 18} __A = parent __A = batch_size __A = num_channels __A = image_size __A = min_resolution __A = max_resolution __A = do_resize __A = size __A = do_normalize __A = image_mean __A = image_std def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : str = ViTImageProcessor if is_vision_available() else None def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = EfficientFormerImageProcessorTester(self ) @property def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' return self.image_proc_tester.prepare_image_processor_dict() def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCamelCase, '''image_mean''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''image_std''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''do_normalize''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''do_resize''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''size''' ) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' # Initialize image_processor __A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __A = prepare_image_inputs(self.image_proc_tester, equal_resolution=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, Image.Image ) # Test not batched input __A = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), ) # Test batched __A = image_processor(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), ) def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' # Initialize image_processor __A = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __A = prepare_image_inputs(self.image_proc_tester, equal_resolution=_lowerCamelCase, numpify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, np.ndarray ) # Test not batched input __A = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), ) # Test batched __A = image_processor(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), ) def _SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' # Initialize image_processor __A = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __A = prepare_image_inputs(self.image_proc_tester, equal_resolution=_lowerCamelCase, torchify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, torch.Tensor ) # Test not batched input __A = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), ) # Test batched __A = image_processor(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), )
266
1
"""simple docstring""" from statistics import mean, stdev def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase = 3 ): """simple docstring""" __A = min(__UpperCamelCase ) __A = max(__UpperCamelCase ) # normalize data return [round((x - x_min) / (x_max - x_min) , __UpperCamelCase ) for x in data] def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase = 3 ): """simple docstring""" __A = mean(__UpperCamelCase ) __A = stdev(__UpperCamelCase ) # standardize data return [round((x - mu) / (sigma) , __UpperCamelCase ) for x in data]
266
"""simple docstring""" import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor lowercase_ = logging.get_logger(__name__) class snake_case ( _lowerCAmelCase ): '''simple docstring''' def __init__( self : Optional[int], *_lowerCamelCase : Union[str, Any], **_lowerCamelCase : Dict ): '''simple docstring''' warnings.warn( '''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use SegformerImageProcessor instead.''', _lowerCamelCase, ) super().__init__(*_lowerCamelCase, **_lowerCamelCase )
266
1
"""simple docstring""" import sacrebleu as scb from packaging import version from sacrebleu import TER import datasets lowercase_ = '\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n' lowercase_ = '\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n' lowercase_ = '\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case ( datasets.Metric ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ): raise ImportWarning( '''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n''' '''You can install it with `pip install "sacrebleu>=1.4.12"`.''' ) return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, homepage='''http://www.cs.umd.edu/~snover/tercom/''', inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { '''predictions''': datasets.Value('''string''', id='''sequence''' ), '''references''': datasets.Sequence(datasets.Value('''string''', id='''sequence''' ), id='''references''' ), } ), codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''], reference_urls=[ '''https://github.com/jhclark/tercom''', ], ) def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : Optional[Any], _lowerCamelCase : Union[str, Any], _lowerCamelCase : bool = False, _lowerCamelCase : bool = False, _lowerCamelCase : bool = False, _lowerCamelCase : bool = False, ): '''simple docstring''' __A = len(references[0] ) if any(len(_lowerCamelCase ) != references_per_prediction for refs in references ): raise ValueError('''Sacrebleu requires the same number of references for each prediction''' ) __A = [[refs[i] for refs in references] for i in range(_lowerCamelCase )] __A = TER( normalized=_lowerCamelCase, no_punct=_lowerCamelCase, asian_support=_lowerCamelCase, case_sensitive=_lowerCamelCase, ) __A = sb_ter.corpus_score(_lowerCamelCase, _lowerCamelCase ) return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
266
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class snake_case ( unittest.TestCase ): '''simple docstring''' def __init__( self : List[Any], _lowerCamelCase : int, _lowerCamelCase : List[Any]=7, _lowerCamelCase : int=3, _lowerCamelCase : Optional[Any]=18, _lowerCamelCase : Any=30, _lowerCamelCase : str=4_00, _lowerCamelCase : int=True, _lowerCamelCase : Union[str, Any]=None, _lowerCamelCase : str=True, ): '''simple docstring''' __A = size if size is not None else {'''height''': 18, '''width''': 18} __A = parent __A = batch_size __A = num_channels __A = image_size __A = min_resolution __A = max_resolution __A = do_resize __A = size __A = apply_ocr def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : Optional[int] = LayoutLMvaImageProcessor if is_pytesseract_available() else None def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = LayoutLMvaImageProcessingTester(self ) @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' __A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCamelCase, '''do_resize''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''size''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''apply_ocr''' ) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' __A = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size, {'''height''': 18, '''width''': 18} ) __A = self.image_processing_class.from_dict(self.image_processor_dict, size=42 ) self.assertEqual(image_processor.size, {'''height''': 42, '''width''': 42} ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' # Initialize image_processing __A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, Image.Image ) # Test not batched input __A = image_processing(image_inputs[0], return_tensors='''pt''' ) self.assertEqual( encoding.pixel_values.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) self.assertIsInstance(encoding.words, _lowerCamelCase ) self.assertIsInstance(encoding.boxes, _lowerCamelCase ) # Test batched __A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' # Initialize image_processing __A = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase, numpify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, np.ndarray ) # Test not batched input __A = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) # Test batched __A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' # Initialize image_processing __A = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase, torchify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, torch.Tensor ) # Test not batched input __A = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) # Test batched __A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) def _SCREAMING_SNAKE_CASE ( self : List[str] ): '''simple docstring''' # with apply_OCR = True __A = LayoutLMvaImageProcessor() from datasets import load_dataset __A = load_dataset('''hf-internal-testing/fixtures_docvqa''', split='''test''' ) __A = Image.open(ds[0]['''file'''] ).convert('''RGB''' ) __A = image_processing(_lowerCamelCase, return_tensors='''pt''' ) self.assertEqual(encoding.pixel_values.shape, (1, 3, 2_24, 2_24) ) self.assertEqual(len(encoding.words ), len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 __A = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231 __A = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words, _lowerCamelCase ) self.assertListEqual(encoding.boxes, _lowerCamelCase ) # with apply_OCR = False __A = LayoutLMvaImageProcessor(apply_ocr=_lowerCamelCase ) __A = image_processing(_lowerCamelCase, return_tensors='''pt''' ) self.assertEqual(encoding.pixel_values.shape, (1, 3, 2_24, 2_24) )
266
1
"""simple docstring""" import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''encoder.embed_positions._float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(__UpperCamelCase , __UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A , __A = emb.weight.shape __A = nn.Linear(__UpperCamelCase , __UpperCamelCase , bias=__UpperCamelCase ) __A = emb.weight.data return lin_layer def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase=None ): """simple docstring""" __A = {} for old_key in state_dict.keys(): __A = old_key if "moe_layer.experts." in key: if expert_idx is not None: __A = key.replace('''moe_layer.experts.0''' , f'ffn.experts.expert_{expert_idx}' ) else: __A = key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' ) if "gate" in key: __A = key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' ) if "fc2" and "experts" not in key: __A = key.replace('''.fc2.''' , '''.ffn.fc2.''' ) if "fc1" and "experts" not in key: __A = key.replace('''.fc1.''' , '''.ffn.fc1.''' ) if ".encoder_attn." in key: __A = key.replace('''.encoder_attn.''' , '''.cross_attention.''' ) if "encoder_attn_layer_norm" in key: __A = key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' ) if "final_layer_norm" in key: __A = key.replace('''final_layer_norm''' , '''ff_layer_norm''' ) __A = state_dict[old_key] return new_dict def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = WEIGHTS_NAME ): """simple docstring""" __A = [] __A = 0 os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase ) for expert in range(__UpperCamelCase ): __A = switch_checkpoint_path + f'-rank-{expert}.pt' if os.path.isfile(__UpperCamelCase ): __A = torch.load(__UpperCamelCase )['''model'''] remove_ignore_keys_(__UpperCamelCase ) __A = rename_fairseq_keys(__UpperCamelCase , __UpperCamelCase ) __A = os.path.join( __UpperCamelCase , weights_name.replace('''.bin''' , f'-{len(__UpperCamelCase )+1:05d}-of-???.bin' ) ) torch.save(__UpperCamelCase , __UpperCamelCase ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(__UpperCamelCase )[0]].dtype ) # Add the last block __A = os.path.join(__UpperCamelCase , weights_name.replace('''.bin''' , f'-{len(__UpperCamelCase )+1:05d}-of-???.bin' ) ) __A = torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model'''] remove_ignore_keys_(__UpperCamelCase ) __A = rename_fairseq_keys(__UpperCamelCase , __UpperCamelCase ) __A = shared_weights['''decoder.embed_tokens.weight'''] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(__UpperCamelCase ) == 1: __A = os.path.join(__UpperCamelCase , __UpperCamelCase ) torch.save(__UpperCamelCase , __UpperCamelCase ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(__UpperCamelCase , __UpperCamelCase ) # Otherwise, let's build the index __A = {} for idx, shard in enumerate(__UpperCamelCase ): __A = weights_name.replace('''.bin''' , f'-{idx+1:05d}-of-{len(__UpperCamelCase ):05d}.bin' ) __A = os.path.join(__UpperCamelCase , weights_name.replace('''.bin''' , f'-{idx+1:05d}-of-???.bin' ) ) os.rename(__UpperCamelCase , os.path.join(__UpperCamelCase , __UpperCamelCase ) ) for key in shard: __A = shard_file # Add the metadata __A = {'''total_size''': total_size} __A = {'''metadata''': metadata, '''weight_map''': weight_map} with open(os.path.join(__UpperCamelCase , __UpperCamelCase ) , '''w''' , encoding='''utf-8''' ) as f: __A = json.dumps(__UpperCamelCase , indent=2 , sort_keys=__UpperCamelCase ) + '''\n''' f.write(__UpperCamelCase ) return metadata, index if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--nllb_moe_checkpoint_path', default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000', type=str, required=False, help='Path to a directory containing a folder per layer. Follows the original Google format.', ) parser.add_argument('--dtype', default='float32', type=str, required=False, help='dtype of the saved model') parser.add_argument( '--pytorch_dump_folder_path', default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b', type=str, required=False, help='Path to the output pytorch model.', ) lowercase_ = parser.parse_args() lowercase_ , lowercase_ = shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 128, args.dtype, ) lowercase_ = NllbMoeConfig.from_pretrained( 'facebook/nllb-200-3.3B', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128 ) config.save_pretrained(args.pytorch_dump_folder_path) lowercase_ = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print('Done') model.save_pretrained(args.pytorch_dump_folder_path)
266
"""simple docstring""" import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class snake_case ( ctypes.Structure ): '''simple docstring''' A_ : List[str] = [("size", ctypes.c_int), ("visible", ctypes.c_byte)] def lowerCAmelCase ( ): """simple docstring""" if os.name == "nt": __A = CursorInfo() __A = ctypes.windll.kernelaa.GetStdHandle(-1_1 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) ) __A = False ctypes.windll.kernelaa.SetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) ) elif os.name == "posix": sys.stdout.write('''\033[?25l''' ) sys.stdout.flush() def lowerCAmelCase ( ): """simple docstring""" if os.name == "nt": __A = CursorInfo() __A = ctypes.windll.kernelaa.GetStdHandle(-1_1 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) ) __A = True ctypes.windll.kernelaa.SetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) ) elif os.name == "posix": sys.stdout.write('''\033[?25h''' ) sys.stdout.flush() @contextmanager def lowerCAmelCase ( ): """simple docstring""" try: hide_cursor() yield finally: show_cursor()
266
1
"""simple docstring""" def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" if not isinstance(__UpperCamelCase , __UpperCamelCase ): raise TypeError('''only integers accepted as input''' ) else: __A = str(abs(__UpperCamelCase ) ) __A = [list(__UpperCamelCase ) for char in range(len(__UpperCamelCase ) )] for index in range(len(__UpperCamelCase ) ): num_transpositions[index].pop(__UpperCamelCase ) return max( int(''''''.join(list(__UpperCamelCase ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__('doctest').testmod()
266
"""simple docstring""" import argparse import struct import unittest class snake_case : '''simple docstring''' def __init__( self : Optional[int], _lowerCamelCase : bytes ): '''simple docstring''' __A = data # Initialize hash values __A = [ 0X6a_09e_667, 0Xbb_67a_e85, 0X3c_6ef_372, 0Xa5_4ff_53a, 0X51_0e5_27f, 0X9b_056_88c, 0X1f_83d_9ab, 0X5b_e0c_d19, ] # Initialize round constants __A = [ 0X42_8a2_f98, 0X71_374_491, 0Xb5_c0f_bcf, 0Xe9_b5d_ba5, 0X39_56c_25b, 0X59_f11_1f1, 0X92_3f8_2a4, 0Xab_1c5_ed5, 0Xd8_07a_a98, 0X12_835_b01, 0X24_318_5be, 0X55_0c7_dc3, 0X72_be5_d74, 0X80_deb_1fe, 0X9b_dc0_6a7, 0Xc1_9bf_174, 0Xe4_9b6_9c1, 0Xef_be4_786, 0X0f_c19_dc6, 0X24_0ca_1cc, 0X2d_e92_c6f, 0X4a_748_4aa, 0X5c_b0a_9dc, 0X76_f98_8da, 0X98_3e5_152, 0Xa8_31c_66d, 0Xb0_032_7c8, 0Xbf_597_fc7, 0Xc6_e00_bf3, 0Xd5_a79_147, 0X06_ca6_351, 0X14_292_967, 0X27_b70_a85, 0X2e_1b2_138, 0X4d_2c6_dfc, 0X53_380_d13, 0X65_0a7_354, 0X76_6a0_abb, 0X81_c2c_92e, 0X92_722_c85, 0Xa2_bfe_8a1, 0Xa8_1a6_64b, 0Xc2_4b8_b70, 0Xc7_6c5_1a3, 0Xd1_92e_819, 0Xd6_990_624, 0Xf4_0e3_585, 0X10_6aa_070, 0X19_a4c_116, 0X1e_376_c08, 0X27_487_74c, 0X34_b0b_cb5, 0X39_1c0_cb3, 0X4e_d8a_a4a, 0X5b_9cc_a4f, 0X68_2e6_ff3, 0X74_8f8_2ee, 0X78_a56_36f, 0X84_c87_814, 0X8c_c70_208, 0X90_bef_ffa, 0Xa4_506_ceb, 0Xbe_f9a_3f7, 0Xc6_717_8f2, ] __A = self.preprocessing(self.data ) self.final_hash() @staticmethod def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : bytes ): '''simple docstring''' __A = b'''\x80''' + (b'''\x00''' * (63 - (len(_lowerCamelCase ) + 8) % 64)) __A = struct.pack('''>Q''', (len(_lowerCamelCase ) * 8) ) return data + padding + big_endian_integer def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' # Convert into blocks of 64 bytes __A = [ self.preprocessed_data[x : x + 64] for x in range(0, len(self.preprocessed_data ), 64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers __A = list(struct.unpack('''>16L''', _lowerCamelCase ) ) # add 48 0-ed integers words += [0] * 48 __A , __A , __A , __A , __A , __A , __A , __A = self.hashes for index in range(0, 64 ): if index > 15: # modify the zero-ed indexes at the end of the array __A = ( self.ror(words[index - 15], 7 ) ^ self.ror(words[index - 15], 18 ) ^ (words[index - 15] >> 3) ) __A = ( self.ror(words[index - 2], 17 ) ^ self.ror(words[index - 2], 19 ) ^ (words[index - 2] >> 10) ) __A = ( words[index - 16] + sa + words[index - 7] + sa ) % 0X100_000_000 # Compression __A = self.ror(_lowerCamelCase, 6 ) ^ self.ror(_lowerCamelCase, 11 ) ^ self.ror(_lowerCamelCase, 25 ) __A = (e & f) ^ ((~e & 0Xff_fff_fff) & g) __A = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0X100_000_000 __A = self.ror(_lowerCamelCase, 2 ) ^ self.ror(_lowerCamelCase, 13 ) ^ self.ror(_lowerCamelCase, 22 ) __A = (a & b) ^ (a & c) ^ (b & c) __A = (sa + maj) % 0X100_000_000 __A , __A , __A , __A , __A , __A , __A , __A = ( g, f, e, ((d + tempa) % 0X100_000_000), c, b, a, ((tempa + tempa) % 0X100_000_000), ) __A = [a, b, c, d, e, f, g, h] # Modify final values __A = [ ((element + mutated_hash_values[index]) % 0X100_000_000) for index, element in enumerate(self.hashes ) ] __A = ''''''.join([hex(_lowerCamelCase )[2:].zfill(8 ) for value in self.hashes] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : int, _lowerCamelCase : int ): '''simple docstring''' return 0Xff_fff_fff & (value << (32 - rotations)) | (value >> rotations) class snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' import hashlib __A = bytes('''Test String''', '''utf-8''' ) self.assertEqual(SHAaaa(_lowerCamelCase ).hash, hashlib.shaaaa(_lowerCamelCase ).hexdigest() ) def lowerCAmelCase ( ): """simple docstring""" import doctest doctest.testmod() __A = argparse.ArgumentParser() parser.add_argument( '''-s''' , '''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , ) parser.add_argument( '''-f''' , '''--file''' , dest='''input_file''' , help='''Hash contents of a file''' ) __A = parser.parse_args() __A = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , '''rb''' ) as f: __A = f.read() else: __A = bytes(__UpperCamelCase , '''utf-8''' ) print(SHAaaa(__UpperCamelCase ).hash ) if __name__ == "__main__": main()
266
1
"""simple docstring""" def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" __A = len(__UpperCamelCase ) __A = [[0] * n for i in range(__UpperCamelCase )] for i in range(__UpperCamelCase ): __A = y_points[i] for i in range(2 , __UpperCamelCase ): for j in range(__UpperCamelCase , __UpperCamelCase ): __A = ( (xa - x_points[j - i + 1]) * q[j][i - 1] - (xa - x_points[j]) * q[j - 1][i - 1] ) / (x_points[j] - x_points[j - i + 1]) return [q[n - 1][n - 1], q] if __name__ == "__main__": import doctest doctest.testmod()
266
"""simple docstring""" import faiss # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import requests # noqa: F401 # Here to have a nice missing dependency error message early on import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on from mauve import compute_mauve # From: mauve-text import datasets lowercase_ = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n' lowercase_ = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n' lowercase_ = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case ( datasets.Metric ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, homepage='''https://github.com/krishnap25/mauve''', inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { '''predictions''': datasets.Value('''string''', id='''sequence''' ), '''references''': datasets.Value('''string''', id='''sequence''' ), } ), codebase_urls=['''https://github.com/krishnap25/mauve'''], reference_urls=[ '''https://arxiv.org/abs/2102.01454''', '''https://github.com/krishnap25/mauve''', ], ) def _SCREAMING_SNAKE_CASE ( self : int, _lowerCamelCase : str, _lowerCamelCase : Optional[Any], _lowerCamelCase : Any=None, _lowerCamelCase : Tuple=None, _lowerCamelCase : Optional[Any]=None, _lowerCamelCase : Union[str, Any]=None, _lowerCamelCase : str="auto", _lowerCamelCase : Union[str, Any]=-1, _lowerCamelCase : List[str]=0.9, _lowerCamelCase : int=5, _lowerCamelCase : Tuple=5_00, _lowerCamelCase : Union[str, Any]="gpt2-large", _lowerCamelCase : int=-1, _lowerCamelCase : Union[str, Any]=10_24, _lowerCamelCase : Union[str, Any]=25, _lowerCamelCase : str=5, _lowerCamelCase : Any=True, _lowerCamelCase : Union[str, Any]=25, ): '''simple docstring''' __A = compute_mauve( p_text=_lowerCamelCase, q_text=_lowerCamelCase, p_features=_lowerCamelCase, q_features=_lowerCamelCase, p_tokens=_lowerCamelCase, q_tokens=_lowerCamelCase, num_buckets=_lowerCamelCase, pca_max_data=_lowerCamelCase, kmeans_explained_var=_lowerCamelCase, kmeans_num_redo=_lowerCamelCase, kmeans_max_iter=_lowerCamelCase, featurize_model_name=_lowerCamelCase, device_id=_lowerCamelCase, max_text_length=_lowerCamelCase, divergence_curve_discretization_size=_lowerCamelCase, mauve_scaling_factor=_lowerCamelCase, verbose=_lowerCamelCase, seed=_lowerCamelCase, ) return out
266
1
"""simple docstring""" import unittest from transformers import RoFormerTokenizer, RoFormerTokenizerFast from transformers.testing_utils import require_rjieba, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_rjieba @require_tokenizers class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : Optional[int] = RoFormerTokenizer A_ : Any = RoFormerTokenizerFast A_ : Dict = True A_ : Optional[int] = True def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' super().setUp() def _SCREAMING_SNAKE_CASE ( self : Optional[Any], **_lowerCamelCase : Dict ): '''simple docstring''' return self.tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''', **_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], **_lowerCamelCase : Any ): '''simple docstring''' return self.rust_tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''', **_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' __A = '''永和服装饰品有限公司,今天天气非常好''' __A = '''永和 服装 饰品 有限公司 , 今 天 天 气 非常 好''' return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = self.get_tokenizer() __A , __A = self.get_chinese_input_output_texts() __A = tokenizer.tokenize(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase, output_text.split() ) __A = tokens + [tokenizer.unk_token] __A = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00] self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ), _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' __A = self.get_rust_tokenizer() __A , __A = self.get_chinese_input_output_texts() __A = tokenizer.tokenize(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase, output_text.split() ) __A = tokens + [tokenizer.unk_token] __A = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00] self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ), _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( self : List[str] ): '''simple docstring''' pass
266
"""simple docstring""" import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs lowercase_ = imread(R'digital_image_processing/image_data/lena_small.jpg') lowercase_ = cvtColor(img, COLOR_BGR2GRAY) def lowerCAmelCase ( ): """simple docstring""" __A = cn.convert_to_negative(__UpperCamelCase ) # assert negative_img array for at least one True assert negative_img.any() def lowerCAmelCase ( ): """simple docstring""" with Image.open('''digital_image_processing/image_data/lena_small.jpg''' ) as img: # Work around assertion for response assert str(cc.change_contrast(__UpperCamelCase , 1_1_0 ) ).startswith( '''<PIL.Image.Image image mode=RGB size=100x100 at''' ) def lowerCAmelCase ( ): """simple docstring""" __A = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def lowerCAmelCase ( ): """simple docstring""" __A = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0 ) # assert ambiguous array for all == True assert canny_img.all() __A = canny.canny(__UpperCamelCase ) # assert canny array for at least one True assert canny_array.any() def lowerCAmelCase ( ): """simple docstring""" assert gg.gaussian_filter(__UpperCamelCase , 5 , sigma=0.9 ).all() def lowerCAmelCase ( ): """simple docstring""" __A = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) __A = conv.img_convolve(__UpperCamelCase , __UpperCamelCase ).astype(__UpperCamelCase ) assert res.any() def lowerCAmelCase ( ): """simple docstring""" assert med.median_filter(__UpperCamelCase , 3 ).any() def lowerCAmelCase ( ): """simple docstring""" __A , __A = sob.sobel_filter(__UpperCamelCase ) assert grad.any() and theta.any() def lowerCAmelCase ( ): """simple docstring""" __A = sp.make_sepia(__UpperCamelCase , 2_0 ) assert sepia.all() def lowerCAmelCase ( __UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" ): """simple docstring""" __A = bs.Burkes(imread(__UpperCamelCase , 1 ) , 1_2_0 ) burkes.process() assert burkes.output_img.any() def lowerCAmelCase ( __UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" , ): """simple docstring""" __A = rs.NearestNeighbour(imread(__UpperCamelCase , 1 ) , 4_0_0 , 2_0_0 ) nn.process() assert nn.output.any() def lowerCAmelCase ( ): """simple docstring""" __A = '''digital_image_processing/image_data/lena.jpg''' # Reading the image and converting it to grayscale. __A = imread(__UpperCamelCase , 0 ) # Test for get_neighbors_pixel function() return not None __A = 0 __A = 0 __A = image[x_coordinate][y_coordinate] __A = lbp.get_neighbors_pixel( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image __A = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): __A = lbp.local_binary_value(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) assert lbp_image.any()
266
1
"""simple docstring""" import numpy as np import torch from torch.utils.data import DataLoader from accelerate.utils.dataclasses import DistributedType class snake_case : '''simple docstring''' def __init__( self : Optional[int], _lowerCamelCase : Optional[int]=2, _lowerCamelCase : Optional[int]=3, _lowerCamelCase : int=64, _lowerCamelCase : List[str]=None ): '''simple docstring''' __A = np.random.default_rng(_lowerCamelCase ) __A = length __A = rng.normal(size=(length,) ).astype(np.floataa ) __A = a * self.x + b + rng.normal(scale=0.1, size=(length,) ).astype(np.floataa ) def __len__( self : str ): '''simple docstring''' return self.length def __getitem__( self : Dict, _lowerCamelCase : Optional[int] ): '''simple docstring''' return {"x": self.x[i], "y": self.y[i]} class snake_case ( torch.nn.Module ): '''simple docstring''' def __init__( self : Optional[Any], _lowerCamelCase : Tuple=0, _lowerCamelCase : Any=0, _lowerCamelCase : Optional[Any]=False ): '''simple docstring''' super().__init__() __A = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) __A = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) __A = True def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : Optional[Any]=None ): '''simple docstring''' if self.first_batch: print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' ) __A = False return x * self.a[0] + self.b[0] class snake_case ( torch.nn.Module ): '''simple docstring''' def __init__( self : str, _lowerCamelCase : Optional[Any]=0, _lowerCamelCase : Any=0, _lowerCamelCase : List[Any]=False ): '''simple docstring''' super().__init__() __A = torch.nn.Parameter(torch.tensor(_lowerCamelCase ).float() ) __A = torch.nn.Parameter(torch.tensor(_lowerCamelCase ).float() ) __A = True def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : List[str]=None ): '''simple docstring''' if self.first_batch: print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' ) __A = False return x * self.a + self.b def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase = 1_6 ): """simple docstring""" from datasets import load_dataset from transformers import AutoTokenizer __A = AutoTokenizer.from_pretrained('''bert-base-cased''' ) __A = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''} __A = load_dataset('''csv''' , data_files=__UpperCamelCase ) __A = datasets['''train'''].unique('''label''' ) __A = {v: i for i, v in enumerate(__UpperCamelCase )} def tokenize_function(__UpperCamelCase ): # max_length=None => use the model max length (it's actually the default) __A = tokenizer( examples['''sentence1'''] , examples['''sentence2'''] , truncation=__UpperCamelCase , max_length=__UpperCamelCase , padding='''max_length''' ) if "label" in examples: __A = [label_to_id[l] for l in examples['''label''']] return outputs # Apply the method we just defined to all the examples in all the splits of the dataset __A = datasets.map( __UpperCamelCase , batched=__UpperCamelCase , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , ) def collate_fn(__UpperCamelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(__UpperCamelCase , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' ) return tokenizer.pad(__UpperCamelCase , padding='''longest''' , return_tensors='''pt''' ) # Instantiate dataloaders. __A = DataLoader(tokenized_datasets['''train'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=2 ) __A = DataLoader(tokenized_datasets['''validation'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=1 ) return train_dataloader, eval_dataloader
266
"""simple docstring""" import itertools import random import unittest import numpy as np from transformers import ASTFeatureExtractor from transformers.testing_utils import require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin lowercase_ = random.Random() if is_torch_available(): import torch def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase=1.0 , __UpperCamelCase=None , __UpperCamelCase=None ): """simple docstring""" if rng is None: __A = global_rng __A = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class snake_case ( unittest.TestCase ): '''simple docstring''' def __init__( self : Any, _lowerCamelCase : List[str], _lowerCamelCase : Any=7, _lowerCamelCase : Optional[int]=4_00, _lowerCamelCase : Optional[int]=20_00, _lowerCamelCase : Dict=1, _lowerCamelCase : Optional[Any]=0.0, _lowerCamelCase : int=1_60_00, _lowerCamelCase : Optional[int]=True, _lowerCamelCase : Dict=True, ): '''simple docstring''' __A = parent __A = batch_size __A = min_seq_length __A = max_seq_length __A = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) __A = feature_size __A = padding_value __A = sampling_rate __A = return_attention_mask __A = do_normalize def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : Optional[Any]=False, _lowerCamelCase : int=False ): '''simple docstring''' def _flatten(_lowerCamelCase : List[str] ): return list(itertools.chain(*_lowerCamelCase ) ) if equal_length: __A = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size __A = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff ) ] if numpify: __A = [np.asarray(_lowerCamelCase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : int = ASTFeatureExtractor def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' __A = ASTFeatureExtractionTester(self ) def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' # Tests that all call wrap to encode_plus and batch_encode_plus __A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 __A = [floats_list((1, x) )[0] for x in range(8_00, 14_00, 2_00 )] __A = [np.asarray(_lowerCamelCase ) for speech_input in speech_inputs] # Test not batched input __A = feat_extract(speech_inputs[0], return_tensors='''np''' ).input_values __A = feat_extract(np_speech_inputs[0], return_tensors='''np''' ).input_values self.assertTrue(np.allclose(_lowerCamelCase, _lowerCamelCase, atol=1e-3 ) ) # Test batched __A = feat_extract(_lowerCamelCase, padding=_lowerCamelCase, return_tensors='''np''' ).input_values __A = feat_extract(_lowerCamelCase, padding=_lowerCamelCase, return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(_lowerCamelCase, _lowerCamelCase ): self.assertTrue(np.allclose(_lowerCamelCase, _lowerCamelCase, atol=1e-3 ) ) # Test 2-D numpy arrays are batched. __A = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)] __A = np.asarray(_lowerCamelCase ) __A = feat_extract(_lowerCamelCase, return_tensors='''np''' ).input_values __A = feat_extract(_lowerCamelCase, return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(_lowerCamelCase, _lowerCamelCase ): self.assertTrue(np.allclose(_lowerCamelCase, _lowerCamelCase, atol=1e-3 ) ) @require_torch def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' import torch __A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __A = np.random.rand(1_00 ).astype(np.floataa ) __A = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: __A = feature_extractor.pad([{'''input_values''': inputs}], return_tensors='''np''' ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) __A = feature_extractor.pad([{'''input_values''': inputs}], return_tensors='''pt''' ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : Union[str, Any] ): '''simple docstring''' from datasets import load_dataset __A = load_dataset('''hf-internal-testing/librispeech_asr_dummy''', '''clean''', split='''validation''' ) # automatic decoding with librispeech __A = ds.sort('''id''' ).select(range(_lowerCamelCase ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] @require_torch def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' # fmt: off __A = torch.tensor( [-0.98_94, -1.27_76, -0.90_66, -1.27_76, -0.93_49, -1.26_09, -1.03_86, -1.27_76, -1.15_61, -1.27_76, -1.20_52, -1.27_23, -1.21_90, -1.21_32, -1.27_76, -1.11_33, -1.19_53, -1.13_43, -1.15_84, -1.22_03, -1.17_70, -1.24_74, -1.23_81, -1.19_36, -0.92_70, -0.83_17, -0.80_49, -0.77_06, -0.75_65, -0.78_69] ) # fmt: on __A = self._load_datasamples(1 ) __A = ASTFeatureExtractor() __A = feature_extractor(_lowerCamelCase, return_tensors='''pt''' ).input_values self.assertEquals(input_values.shape, (1, 10_24, 1_28) ) self.assertTrue(torch.allclose(input_values[0, 0, :30], _lowerCamelCase, atol=1e-4 ) )
266
1
"""simple docstring""" import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, Features, Value from .base import TaskTemplate @dataclass(frozen=_lowerCAmelCase ) class snake_case ( _lowerCAmelCase ): '''simple docstring''' A_ : str = field(default="automatic-speech-recognition" , metadata={"include_in_asdict_even_if_is_default": True} ) A_ : ClassVar[Features] = Features({"audio": Audio()} ) A_ : ClassVar[Features] = Features({"transcription": Value("string" )} ) A_ : str = "audio" A_ : str = "transcription" def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : Union[str, Any] ): '''simple docstring''' if self.audio_column not in features: raise ValueError(f'Column {self.audio_column} is not present in features.' ) if not isinstance(features[self.audio_column], _lowerCamelCase ): raise ValueError(f'Column {self.audio_column} is not an Audio type.' ) __A = copy.deepcopy(self ) __A = self.input_schema.copy() __A = features[self.audio_column] __A = input_schema return task_template @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' return {self.audio_column: "audio", self.transcription_column: "transcription"}
266
"""simple docstring""" def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = current_set.copy() for row_index, row in enumerate(__UpperCamelCase ): __A = row[0] for column_index, column in enumerate(__UpperCamelCase ): if magnitude == 0: __A = column continue __A = column / magnitude # Subtract to cancel term __A = current_set[0] __A = [first_row] __A = current_set[1::] for row in current_set: __A = [] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(__UpperCamelCase ) continue for column_index in range(len(__UpperCamelCase ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(__UpperCamelCase ) # Create next recursion iteration set if len(final_set[0] ) != 3: __A = final_set[0] __A = [] __A = [] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) __A = simplify(__UpperCamelCase ) for i in range(len(__UpperCamelCase ) ): resultant[i].insert(0 , current_first_column[i] ) resultant.insert(0 , __UpperCamelCase ) __A = resultant return final_set def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" if len(__UpperCamelCase ) == 0: raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) __A = len(__UpperCamelCase ) + 1 if any(len(__UpperCamelCase ) != _length for item in equations ): raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) for row in equations: if any(not isinstance(__UpperCamelCase , (int, float) ) for column in row ): raise ValueError('''solve_simultaneous() requires lists of integers''' ) if len(__UpperCamelCase ) == 1: return [equations[0][-1] / equations[0][0]] __A = equations.copy() if any(0 in row for row in data_set ): __A = data_set.copy() __A = [] for row_index, row in enumerate(__UpperCamelCase ): if 0 not in row: __A = data_set.pop(__UpperCamelCase ) break if not full_row: raise ValueError('''solve_simultaneous() requires at least 1 full equation''' ) data_set.insert(0 , __UpperCamelCase ) __A = data_set.copy() __A = simplify(__UpperCamelCase ) __A = simplified[::-1] __A = [] for row in simplified: __A = row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue __A = row.copy()[: len(__UpperCamelCase ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(__UpperCamelCase ) == 0: solutions.append(0 ) continue __A = temp_row[1::] __A = temp_row[::-1] for column_index, column in enumerate(__UpperCamelCase ): current_solution -= column * solutions[column_index] solutions.append(__UpperCamelCase ) __A = [] for item in solutions: final.append(float(round(__UpperCamelCase , 5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() lowercase_ = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
266
1
"""simple docstring""" import argparse import torch from transformers import GPTaLMHeadModel, RobertaForMaskedLM if __name__ == "__main__": lowercase_ = argparse.ArgumentParser( description=( 'Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned' ' Distillation' ) ) parser.add_argument('--model_type', default='roberta', choices=['roberta', 'gpt2']) parser.add_argument('--model_name', default='roberta-large', type=str) parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_roberta_048131723.pth', type=str) parser.add_argument('--vocab_transform', action='store_true') lowercase_ = parser.parse_args() if args.model_type == "roberta": lowercase_ = RobertaForMaskedLM.from_pretrained(args.model_name) lowercase_ = 'roberta' elif args.model_type == "gpt2": lowercase_ = GPTaLMHeadModel.from_pretrained(args.model_name) lowercase_ = 'transformer' lowercase_ = model.state_dict() lowercase_ = {} # Embeddings # if args.model_type == "gpt2": for param_name in ["wte.weight", "wpe.weight"]: lowercase_ = state_dict[F'''{prefix}.{param_name}'''] else: for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]: lowercase_ = F'''{prefix}.embeddings.{w}.weight''' lowercase_ = state_dict[param_name] for w in ["weight", "bias"]: lowercase_ = F'''{prefix}.embeddings.LayerNorm.{w}''' lowercase_ = state_dict[param_name] # Transformer Blocks # lowercase_ = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: if args.model_type == "gpt2": for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]: for w in ["weight", "bias"]: lowercase_ = state_dict[ F'''{prefix}.h.{teacher_idx}.{layer}.{w}''' ] lowercase_ = state_dict[F'''{prefix}.h.{teacher_idx}.attn.bias'''] else: for layer in [ "attention.self.query", "attention.self.key", "attention.self.value", "attention.output.dense", "attention.output.LayerNorm", "intermediate.dense", "output.dense", "output.LayerNorm", ]: for w in ["weight", "bias"]: lowercase_ = state_dict[ F'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}''' ] std_idx += 1 # Language Modeling Head ###s if args.model_type == "roberta": for layer in ["lm_head.decoder.weight", "lm_head.bias"]: lowercase_ = state_dict[F'''{layer}'''] if args.vocab_transform: for w in ["weight", "bias"]: lowercase_ = state_dict[F'''lm_head.dense.{w}'''] lowercase_ = state_dict[F'''lm_head.layer_norm.{w}'''] elif args.model_type == "gpt2": for w in ["weight", "bias"]: lowercase_ = state_dict[F'''{prefix}.ln_f.{w}'''] lowercase_ = state_dict['lm_head.weight'] print(F'''N layers selected for distillation: {std_idx}''') print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''') print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''') torch.save(compressed_sd, args.dump_checkpoint)
266
"""simple docstring""" from __future__ import annotations from typing import Any def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" if not postfix_notation: return 0 __A = {'''+''', '''-''', '''*''', '''/'''} __A = [] for token in postfix_notation: if token in operations: __A , __A = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(__UpperCamelCase ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
266
1
"""simple docstring""" from random import randint, random def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = 5 , ): """simple docstring""" __A = [[-1] * number_of_cells] # Create a highway without any car __A = 0 __A = max(__UpperCamelCase , 0 ) while i < number_of_cells: __A = ( randint(0 , __UpperCamelCase ) if random_speed else initial_speed ) # Place the cars i += ( randint(1 , max_speed * 2 ) if random_frequency else frequency ) # Arbitrary number, may need tuning return highway def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" __A = 0 __A = highway_now[car_index + 1 :] for cell in range(len(__UpperCamelCase ) ): # May need a better name for this if cells[cell] != -1: # If the cell is not empty then return distance # we have the distance we wanted distance += 1 # Here if the car is near the end of the highway return distance + get_distance(__UpperCamelCase , -1 ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" __A = len(__UpperCamelCase ) # Beforce calculations, the highway is empty __A = [-1] * number_of_cells for car_index in range(__UpperCamelCase ): if highway_now[car_index] != -1: # Add 1 to the current speed of the car and cap the speed __A = min(highway_now[car_index] + 1 , __UpperCamelCase ) # Number of empty cell before the next car __A = get_distance(__UpperCamelCase , __UpperCamelCase ) - 1 # We can't have the car causing an accident __A = min(next_highway[car_index] , __UpperCamelCase ) if random() < probability: # Randomly, a driver will slow down __A = max(next_highway[car_index] - 1 , 0 ) return next_highway def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" __A = len(highway[0] ) for i in range(__UpperCamelCase ): __A = update(highway[i] , __UpperCamelCase , __UpperCamelCase ) __A = [-1] * number_of_cells for car_index in range(__UpperCamelCase ): __A = next_speeds_calculated[car_index] if speed != -1: # Change the position based on the speed (with % to create the loop) __A = (car_index + speed) % number_of_cells # Commit the change of position __A = speed highway.append(__UpperCamelCase ) return highway if __name__ == "__main__": import doctest doctest.testmod()
266
"""simple docstring""" import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class snake_case ( unittest.TestCase ): '''simple docstring''' def __init__( self : Optional[Any], _lowerCamelCase : Tuple, _lowerCamelCase : List[str]=13, _lowerCamelCase : Optional[Any]=7, _lowerCamelCase : Optional[int]=True, _lowerCamelCase : int=True, _lowerCamelCase : List[str]=True, _lowerCamelCase : Optional[Any]=True, _lowerCamelCase : int=99, _lowerCamelCase : Optional[int]=32, _lowerCamelCase : Tuple=5, _lowerCamelCase : Tuple=4, _lowerCamelCase : str=37, _lowerCamelCase : Union[str, Any]="gelu", _lowerCamelCase : int=0.1, _lowerCamelCase : List[Any]=0.1, _lowerCamelCase : Dict=5_12, _lowerCamelCase : List[Any]=16, _lowerCamelCase : Any=2, _lowerCamelCase : Any=0.02, _lowerCamelCase : Dict=4, ): '''simple docstring''' __A = parent __A = batch_size __A = seq_length __A = is_training __A = use_attention_mask __A = use_token_type_ids __A = use_labels __A = vocab_size __A = hidden_size __A = num_hidden_layers __A = num_attention_heads __A = intermediate_size __A = hidden_act __A = hidden_dropout_prob __A = attention_probs_dropout_prob __A = max_position_embeddings __A = type_vocab_size __A = type_sequence_label_size __A = initializer_range __A = num_choices def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' __A = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) __A = None if self.use_attention_mask: __A = random_attention_mask([self.batch_size, self.seq_length] ) __A = None if self.use_token_type_ids: __A = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size ) __A = RoFormerConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=_lowerCamelCase, initializer_range=self.initializer_range, ) return config, input_ids, token_type_ids, attention_mask def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = self.prepare_config_and_inputs() __A , __A , __A , __A = config_and_inputs __A = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask} return config, inputs_dict @require_flax class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : Dict = True A_ : Tuple = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' __A = FlaxRoFormerModelTester(self ) @slow def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' for model_class_name in self.all_model_classes: __A = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''', from_pt=_lowerCamelCase ) __A = model(np.ones((1, 1) ) ) self.assertIsNotNone(_lowerCamelCase ) @require_flax class snake_case ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' ) __A = jnp.array([[0, 1, 2, 3, 4, 5]] ) __A = model(_lowerCamelCase )[0] __A = 5_00_00 __A = (1, 6, vocab_size) self.assertEqual(output.shape, _lowerCamelCase ) __A = jnp.array( [[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3], _lowerCamelCase, atol=1e-4 ) )
266
1
"""simple docstring""" from __future__ import annotations import math class snake_case : '''simple docstring''' def __init__( self : Tuple, _lowerCamelCase : int ): '''simple docstring''' __A = size # approximate the overall size of segment tree with given value __A = [0 for i in range(0, 4 * size )] # create array to store lazy update __A = [0 for i in range(0, 4 * size )] __A = [0 for i in range(0, 4 * size )] # flag for lazy update def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : int ): '''simple docstring''' return idx * 2 def _SCREAMING_SNAKE_CASE ( self : int, _lowerCamelCase : int ): '''simple docstring''' return idx * 2 + 1 def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : int, _lowerCamelCase : int, _lowerCamelCase : int, _lowerCamelCase : list[int] ): '''simple docstring''' if left_element == right_element: __A = a[left_element - 1] else: __A = (left_element + right_element) // 2 self.build(self.left(_lowerCamelCase ), _lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) self.build(self.right(_lowerCamelCase ), mid + 1, _lowerCamelCase, _lowerCamelCase ) __A = max( self.segment_tree[self.left(_lowerCamelCase )], self.segment_tree[self.right(_lowerCamelCase )] ) def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : int, _lowerCamelCase : int, _lowerCamelCase : int, _lowerCamelCase : int, _lowerCamelCase : int, _lowerCamelCase : int ): '''simple docstring''' if self.flag[idx] is True: __A = self.lazy[idx] __A = False if left_element != right_element: __A = self.lazy[idx] __A = self.lazy[idx] __A = True __A = True if right_element < a or left_element > b: return True if left_element >= a and right_element <= b: __A = val if left_element != right_element: __A = val __A = val __A = True __A = True return True __A = (left_element + right_element) // 2 self.update(self.left(_lowerCamelCase ), _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) self.update(self.right(_lowerCamelCase ), mid + 1, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) __A = max( self.segment_tree[self.left(_lowerCamelCase )], self.segment_tree[self.right(_lowerCamelCase )] ) return True def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : int, _lowerCamelCase : int, _lowerCamelCase : int, _lowerCamelCase : int, _lowerCamelCase : int ): '''simple docstring''' if self.flag[idx] is True: __A = self.lazy[idx] __A = False if left_element != right_element: __A = self.lazy[idx] __A = self.lazy[idx] __A = True __A = True if right_element < a or left_element > b: return -math.inf if left_element >= a and right_element <= b: return self.segment_tree[idx] __A = (left_element + right_element) // 2 __A = self.query(self.left(_lowerCamelCase ), _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) __A = self.query(self.right(_lowerCamelCase ), mid + 1, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) return max(_lowerCamelCase, _lowerCamelCase ) def __str__( self : Dict ): '''simple docstring''' return str([self.query(1, 1, self.size, _lowerCamelCase, _lowerCamelCase ) for i in range(1, self.size + 1 )] ) if __name__ == "__main__": lowercase_ = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8] lowercase_ = 15 lowercase_ = SegmentTree(size) segt.build(1, 1, size, A) print(segt.query(1, 1, size, 4, 6)) print(segt.query(1, 1, size, 7, 11)) print(segt.query(1, 1, size, 7, 12)) segt.update(1, 1, size, 1, 3, 111) print(segt.query(1, 1, size, 1, 15)) segt.update(1, 1, size, 7, 8, 235) print(segt)
266
"""simple docstring""" from collections import defaultdict from math import ceil, sqrt def lowerCAmelCase ( __UpperCamelCase = 1_0_0_0_0_0_0 , __UpperCamelCase = 1_0 ): """simple docstring""" __A = defaultdict(__UpperCamelCase ) for outer_width in range(3 , (t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: __A = max( ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 ) else: __A = 1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(__UpperCamelCase , outer_width - 1 , 2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 1_0 ) if __name__ == "__main__": print(F'''{solution() = }''')
266
1
"""simple docstring""" lowercase_ = range(2, 20 + 1) lowercase_ = [10**k for k in range(ks[-1] + 1)] lowercase_ = {} def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" __A = sum(a_i[j] for j in range(__UpperCamelCase , len(__UpperCamelCase ) ) ) __A = sum(a_i[j] * base[j] for j in range(min(len(__UpperCamelCase ) , __UpperCamelCase ) ) ) __A , __A = 0, 0 __A = n - i __A = memo.get(__UpperCamelCase ) if sub_memo is not None: __A = sub_memo.get(__UpperCamelCase ) if jumps is not None and len(__UpperCamelCase ) > 0: # find and make the largest jump without going over __A = -1 for _k in range(len(__UpperCamelCase ) - 1 , -1 , -1 ): if jumps[_k][2] <= k and jumps[_k][1] <= max_dn: __A = _k break if max_jump >= 0: __A , __A , __A = jumps[max_jump] # since the difference between jumps is cached, add c __A = diff + c for j in range(min(__UpperCamelCase , len(__UpperCamelCase ) ) ): __A , __A = divmod(__UpperCamelCase , 1_0 ) if new_c > 0: add(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) else: __A = [] else: __A = {c: []} __A = sub_memo if dn >= max_dn or c + diff >= base[k]: return diff, dn if k > ks[0]: while True: # keep doing smaller jumps __A , __A = next_term(__UpperCamelCase , k - 1 , i + dn , __UpperCamelCase ) diff += _diff dn += terms_jumped if dn >= max_dn or c + diff >= base[k]: break else: # would be too small a jump, just compute sequential terms instead __A , __A = compute(__UpperCamelCase , __UpperCamelCase , i + dn , __UpperCamelCase ) diff += _diff dn += terms_jumped __A = sub_memo[c] # keep jumps sorted by # of terms skipped __A = 0 while j < len(__UpperCamelCase ): if jumps[j][1] > dn: break j += 1 # cache the jump for this value digitsum(b) and c sub_memo[c].insert(__UpperCamelCase , (diff, dn, k) ) return (diff, dn) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" if i >= n: return 0, i if k > len(__UpperCamelCase ): a_i.extend([0 for _ in range(k - len(__UpperCamelCase ) )] ) # note: a_i -> b * 10^k + c # ds_b -> digitsum(b) # ds_c -> digitsum(c) __A = i __A , __A , __A = 0, 0, 0 for j in range(len(__UpperCamelCase ) ): if j >= k: ds_b += a_i[j] else: ds_c += a_i[j] while i < n: i += 1 __A = ds_c + ds_b diff += addend __A = 0 for j in range(__UpperCamelCase ): __A = a_i[j] + addend __A , __A = divmod(__UpperCamelCase , 1_0 ) ds_c += a_i[j] if addend > 0: break if addend > 0: add(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) return diff, i - start_i def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" for j in range(__UpperCamelCase , len(__UpperCamelCase ) ): __A = digits[j] + addend if s >= 1_0: __A , __A = divmod(__UpperCamelCase , 1_0 ) __A = addend // 1_0 + quotient else: __A = s __A = addend // 1_0 if addend == 0: break while addend > 0: __A , __A = divmod(__UpperCamelCase , 1_0 ) digits.append(__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase = 1_0**1_5 ): """simple docstring""" __A = [1] __A = 1 __A = 0 while True: __A , __A = next_term(__UpperCamelCase , 2_0 , i + dn , __UpperCamelCase ) dn += terms_jumped if dn == n - i: break __A = 0 for j in range(len(__UpperCamelCase ) ): a_n += digits[j] * 1_0**j return a_n if __name__ == "__main__": print(F'''{solution() = }''')
266
"""simple docstring""" import numpy as np import torch from torch.utils.data import DataLoader from accelerate.utils.dataclasses import DistributedType class snake_case : '''simple docstring''' def __init__( self : Optional[int], _lowerCamelCase : Optional[int]=2, _lowerCamelCase : Optional[int]=3, _lowerCamelCase : int=64, _lowerCamelCase : List[str]=None ): '''simple docstring''' __A = np.random.default_rng(_lowerCamelCase ) __A = length __A = rng.normal(size=(length,) ).astype(np.floataa ) __A = a * self.x + b + rng.normal(scale=0.1, size=(length,) ).astype(np.floataa ) def __len__( self : str ): '''simple docstring''' return self.length def __getitem__( self : Dict, _lowerCamelCase : Optional[int] ): '''simple docstring''' return {"x": self.x[i], "y": self.y[i]} class snake_case ( torch.nn.Module ): '''simple docstring''' def __init__( self : Optional[Any], _lowerCamelCase : Tuple=0, _lowerCamelCase : Any=0, _lowerCamelCase : Optional[Any]=False ): '''simple docstring''' super().__init__() __A = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) __A = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) __A = True def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : Optional[Any]=None ): '''simple docstring''' if self.first_batch: print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' ) __A = False return x * self.a[0] + self.b[0] class snake_case ( torch.nn.Module ): '''simple docstring''' def __init__( self : str, _lowerCamelCase : Optional[Any]=0, _lowerCamelCase : Any=0, _lowerCamelCase : List[Any]=False ): '''simple docstring''' super().__init__() __A = torch.nn.Parameter(torch.tensor(_lowerCamelCase ).float() ) __A = torch.nn.Parameter(torch.tensor(_lowerCamelCase ).float() ) __A = True def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : List[str]=None ): '''simple docstring''' if self.first_batch: print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' ) __A = False return x * self.a + self.b def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase = 1_6 ): """simple docstring""" from datasets import load_dataset from transformers import AutoTokenizer __A = AutoTokenizer.from_pretrained('''bert-base-cased''' ) __A = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''} __A = load_dataset('''csv''' , data_files=__UpperCamelCase ) __A = datasets['''train'''].unique('''label''' ) __A = {v: i for i, v in enumerate(__UpperCamelCase )} def tokenize_function(__UpperCamelCase ): # max_length=None => use the model max length (it's actually the default) __A = tokenizer( examples['''sentence1'''] , examples['''sentence2'''] , truncation=__UpperCamelCase , max_length=__UpperCamelCase , padding='''max_length''' ) if "label" in examples: __A = [label_to_id[l] for l in examples['''label''']] return outputs # Apply the method we just defined to all the examples in all the splits of the dataset __A = datasets.map( __UpperCamelCase , batched=__UpperCamelCase , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , ) def collate_fn(__UpperCamelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(__UpperCamelCase , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' ) return tokenizer.pad(__UpperCamelCase , padding='''longest''' , return_tensors='''pt''' ) # Instantiate dataloaders. __A = DataLoader(tokenized_datasets['''train'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=2 ) __A = DataLoader(tokenized_datasets['''validation'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=1 ) return train_dataloader, eval_dataloader
266
1
"""simple docstring""" def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = current_set.copy() for row_index, row in enumerate(__UpperCamelCase ): __A = row[0] for column_index, column in enumerate(__UpperCamelCase ): if magnitude == 0: __A = column continue __A = column / magnitude # Subtract to cancel term __A = current_set[0] __A = [first_row] __A = current_set[1::] for row in current_set: __A = [] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(__UpperCamelCase ) continue for column_index in range(len(__UpperCamelCase ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(__UpperCamelCase ) # Create next recursion iteration set if len(final_set[0] ) != 3: __A = final_set[0] __A = [] __A = [] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) __A = simplify(__UpperCamelCase ) for i in range(len(__UpperCamelCase ) ): resultant[i].insert(0 , current_first_column[i] ) resultant.insert(0 , __UpperCamelCase ) __A = resultant return final_set def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" if len(__UpperCamelCase ) == 0: raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) __A = len(__UpperCamelCase ) + 1 if any(len(__UpperCamelCase ) != _length for item in equations ): raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) for row in equations: if any(not isinstance(__UpperCamelCase , (int, float) ) for column in row ): raise ValueError('''solve_simultaneous() requires lists of integers''' ) if len(__UpperCamelCase ) == 1: return [equations[0][-1] / equations[0][0]] __A = equations.copy() if any(0 in row for row in data_set ): __A = data_set.copy() __A = [] for row_index, row in enumerate(__UpperCamelCase ): if 0 not in row: __A = data_set.pop(__UpperCamelCase ) break if not full_row: raise ValueError('''solve_simultaneous() requires at least 1 full equation''' ) data_set.insert(0 , __UpperCamelCase ) __A = data_set.copy() __A = simplify(__UpperCamelCase ) __A = simplified[::-1] __A = [] for row in simplified: __A = row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue __A = row.copy()[: len(__UpperCamelCase ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(__UpperCamelCase ) == 0: solutions.append(0 ) continue __A = temp_row[1::] __A = temp_row[::-1] for column_index, column in enumerate(__UpperCamelCase ): current_solution -= column * solutions[column_index] solutions.append(__UpperCamelCase ) __A = [] for item in solutions: final.append(float(round(__UpperCamelCase , 5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() lowercase_ = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
266
"""simple docstring""" from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo lowercase_ = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n' lowercase_ = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n' lowercase_ = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case ( datasets.Metric ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''', id='''token''' ), id='''sequence''' ), '''references''': datasets.Sequence( datasets.Sequence(datasets.Value('''string''', id='''token''' ), id='''sequence''' ), id='''references''' ), } ), ) def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : List[List[List[str]]], _lowerCamelCase : List[List[str]], _lowerCamelCase : int = 1, _lowerCamelCase : int = 4, ): '''simple docstring''' return { "google_bleu": gleu_score.corpus_gleu( list_of_references=_lowerCamelCase, hypotheses=_lowerCamelCase, min_len=_lowerCamelCase, max_len=_lowerCamelCase ) }
266
1
"""simple docstring""" import json import os import re import unicodedata from json.encoder import INFINITY from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import regex from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging from ...utils.generic import _is_jax, _is_numpy lowercase_ = logging.get_logger(__name__) lowercase_ = { 'artists_file': 'artists.json', 'lyrics_file': 'lyrics.json', 'genres_file': 'genres.json', } lowercase_ = { 'artists_file': { 'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json', }, 'genres_file': { 'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json', }, 'lyrics_file': { 'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json', }, } lowercase_ = { 'jukebox': 512, } class snake_case ( _lowerCAmelCase ): '''simple docstring''' A_ : List[str] = VOCAB_FILES_NAMES A_ : Any = PRETRAINED_VOCAB_FILES_MAP A_ : Union[str, Any] = PRETRAINED_LYRIC_TOKENS_SIZES A_ : int = ["input_ids", "attention_mask"] def __init__( self : Optional[int], _lowerCamelCase : Optional[int], _lowerCamelCase : str, _lowerCamelCase : List[str], _lowerCamelCase : int=["v3", "v2", "v2"], _lowerCamelCase : Optional[int]=5_12, _lowerCamelCase : Tuple=5, _lowerCamelCase : Tuple="<|endoftext|>", **_lowerCamelCase : Union[str, Any], ): '''simple docstring''' __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else unk_token super().__init__( unk_token=_lowerCamelCase, n_genres=_lowerCamelCase, version=_lowerCamelCase, max_n_lyric_tokens=_lowerCamelCase, **_lowerCamelCase, ) __A = version __A = max_n_lyric_tokens __A = n_genres with open(_lowerCamelCase, encoding='''utf-8''' ) as vocab_handle: __A = json.load(_lowerCamelCase ) with open(_lowerCamelCase, encoding='''utf-8''' ) as vocab_handle: __A = json.load(_lowerCamelCase ) with open(_lowerCamelCase, encoding='''utf-8''' ) as vocab_handle: __A = json.load(_lowerCamelCase ) __A = R'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+''' # In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters. if len(self.lyrics_encoder ) == 79: __A = oov.replace(R'''\-\'''', R'''\-+\'''' ) __A = regex.compile(_lowerCamelCase ) __A = {v: k for k, v in self.artists_encoder.items()} __A = {v: k for k, v in self.genres_encoder.items()} __A = {v: k for k, v in self.lyrics_encoder.items()} @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder ) def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' return dict(self.artists_encoder, self.genres_encoder, self.lyrics_encoder ) def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : Optional[int], _lowerCamelCase : Optional[int], _lowerCamelCase : Optional[int] ): '''simple docstring''' __A = [self.artists_encoder.get(_lowerCamelCase, 0 ) for artist in list_artists] for genres in range(len(_lowerCamelCase ) ): __A = [self.genres_encoder.get(_lowerCamelCase, 0 ) for genre in list_genres[genres]] __A = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] )) __A = [[self.lyrics_encoder.get(_lowerCamelCase, 0 ) for character in list_lyrics[0]], [], []] return artists_id, list_genres, lyric_ids def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : Any ): '''simple docstring''' return list(_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : List[str], _lowerCamelCase : Any, _lowerCamelCase : Optional[int], **_lowerCamelCase : Union[str, Any] ): '''simple docstring''' __A , __A , __A = self.prepare_for_tokenization(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) __A = self._tokenize(_lowerCamelCase ) return artist, genre, lyrics def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : str, _lowerCamelCase : str, _lowerCamelCase : str, _lowerCamelCase : bool = False ): '''simple docstring''' for idx in range(len(self.version ) ): if self.version[idx] == "v3": __A = artists[idx].lower() __A = [genres[idx].lower()] else: __A = self._normalize(artists[idx] ) + '''.v2''' __A = [ self._normalize(_lowerCamelCase ) + '''.v2''' for genre in genres[idx].split('''_''' ) ] # split is for the full dictionary with combined genres if self.version[0] == "v2": __A = regex.compile(R'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+''' ) __A = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n''' __A = {vocab[index]: index + 1 for index in range(len(_lowerCamelCase ) )} __A = 0 __A = len(_lowerCamelCase ) + 1 __A = self.vocab __A = {v: k for k, v in self.vocab.items()} __A = '''''' else: __A = regex.compile(R'''[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+''' ) __A = self._run_strip_accents(_lowerCamelCase ) __A = lyrics.replace('''\\''', '''\n''' ) __A = self.out_of_vocab.sub('''''', _lowerCamelCase ), [], [] return artists, genres, lyrics def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : Optional[Any] ): '''simple docstring''' __A = unicodedata.normalize('''NFD''', _lowerCamelCase ) __A = [] for char in text: __A = unicodedata.category(_lowerCamelCase ) if cat == "Mn": continue output.append(_lowerCamelCase ) return "".join(_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : str ): '''simple docstring''' __A = ( [chr(_lowerCamelCase ) for i in range(ord('''a''' ), ord('''z''' ) + 1 )] + [chr(_lowerCamelCase ) for i in range(ord('''A''' ), ord('''Z''' ) + 1 )] + [chr(_lowerCamelCase ) for i in range(ord('''0''' ), ord('''9''' ) + 1 )] + ['''.'''] ) __A = frozenset(_lowerCamelCase ) __A = re.compile(R'''_+''' ) __A = ''''''.join([c if c in accepted else '''_''' for c in text.lower()] ) __A = pattern.sub('''_''', _lowerCamelCase ).strip('''_''' ) return text def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : List[str] ): '''simple docstring''' return " ".join(_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : Optional[int], _lowerCamelCase : Optional[Union[str, TensorType]] = None, _lowerCamelCase : bool = False ): '''simple docstring''' # Convert to TensorType if not isinstance(_lowerCamelCase, _lowerCamelCase ): __A = TensorType(_lowerCamelCase ) # Get a function reference for the correct framework if tensor_type == TensorType.TENSORFLOW: if not is_tf_available(): raise ImportError( '''Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.''' ) import tensorflow as tf __A = tf.constant __A = tf.is_tensor elif tensor_type == TensorType.PYTORCH: if not is_torch_available(): raise ImportError('''Unable to convert output to PyTorch tensors format, PyTorch is not installed.''' ) import torch __A = torch.tensor __A = torch.is_tensor elif tensor_type == TensorType.JAX: if not is_flax_available(): raise ImportError('''Unable to convert output to JAX tensors format, JAX is not installed.''' ) import jax.numpy as jnp # noqa: F811 __A = jnp.array __A = _is_jax else: __A = np.asarray __A = _is_numpy # Do the tensor conversion in batch try: if prepend_batch_axis: __A = [inputs] if not is_tensor(_lowerCamelCase ): __A = as_tensor(_lowerCamelCase ) except: # noqa E722 raise ValueError( '''Unable to create tensor, you should probably activate truncation and/or padding ''' '''with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.''' ) return inputs def __call__( self : int, _lowerCamelCase : List[Any], _lowerCamelCase : Optional[Any], _lowerCamelCase : str="", _lowerCamelCase : Optional[Any]="pt" ): '''simple docstring''' __A = [0, 0, 0] __A = [artist] * len(self.version ) __A = [genres] * len(self.version ) __A , __A , __A = self.tokenize(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) __A , __A , __A = self._convert_token_to_id(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) __A = [-INFINITY] * len(full_tokens[-1] ) __A = [ self.convert_to_tensors( [input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]], tensor_type=_lowerCamelCase ) for i in range(len(self.version ) ) ] return BatchEncoding({'''input_ids''': input_ids, '''attention_masks''': attention_masks} ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : str, _lowerCamelCase : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(_lowerCamelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __A = os.path.join( _lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''artists_file'''] ) with open(_lowerCamelCase, '''w''', encoding='''utf-8''' ) as f: f.write(json.dumps(self.artists_encoder, ensure_ascii=_lowerCamelCase ) ) __A = os.path.join( _lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''genres_file'''] ) with open(_lowerCamelCase, '''w''', encoding='''utf-8''' ) as f: f.write(json.dumps(self.genres_encoder, ensure_ascii=_lowerCamelCase ) ) __A = os.path.join( _lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''lyrics_file'''] ) with open(_lowerCamelCase, '''w''', encoding='''utf-8''' ) as f: f.write(json.dumps(self.lyrics_encoder, ensure_ascii=_lowerCamelCase ) ) return (artists_file, genres_file, lyrics_file) def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : List[Any], _lowerCamelCase : Optional[int], _lowerCamelCase : Tuple ): '''simple docstring''' __A = self.artists_decoder.get(_lowerCamelCase ) __A = [self.genres_decoder.get(_lowerCamelCase ) for genre in genres_index] __A = [self.lyrics_decoder.get(_lowerCamelCase ) for character in lyric_index] return artist, genres, lyrics
266
"""simple docstring""" class snake_case : '''simple docstring''' def __init__( self : List[str], _lowerCamelCase : list[int] ): '''simple docstring''' __A = len(_lowerCamelCase ) __A = [0] * len_array if len_array > 0: __A = array[0] for i in range(1, _lowerCamelCase ): __A = self.prefix_sum[i - 1] + array[i] def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : int, _lowerCamelCase : int ): '''simple docstring''' if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : int ): '''simple docstring''' __A = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(_lowerCamelCase ) return False if __name__ == "__main__": import doctest doctest.testmod()
266
1
"""simple docstring""" from collections import Counter from pathlib import Path from typing import Optional, Tuple import yaml class snake_case ( yaml.SafeLoader ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : int, _lowerCamelCase : Union[str, Any] ): '''simple docstring''' __A = [self.constructed_objects[key_node] for key_node, _ in node.value] __A = [tuple(_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else key for key in keys] __A = Counter(_lowerCamelCase ) __A = [key for key in counter if counter[key] > 1] if duplicate_keys: raise TypeError(f'Got duplicate yaml keys: {duplicate_keys}' ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : Optional[int], _lowerCamelCase : Union[str, Any]=False ): '''simple docstring''' __A = super().construct_mapping(_lowerCamelCase, deep=_lowerCamelCase ) self._check_no_duplicates_on_constructed_node(_lowerCamelCase ) return mapping def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = list(readme_content.splitlines() ) if full_content and full_content[0] == "---" and "---" in full_content[1:]: __A = full_content[1:].index('''---''' ) + 1 __A = '''\n'''.join(full_content[1:sep_idx] ) return yamlblock, "\n".join(full_content[sep_idx + 1 :] ) return None, "\n".join(__UpperCamelCase ) class snake_case ( _lowerCAmelCase ): '''simple docstring''' A_ : Tuple = {"train_eval_index"} # train-eval-index in the YAML metadata @classmethod def _SCREAMING_SNAKE_CASE ( cls : Optional[Any], _lowerCamelCase : Path ): '''simple docstring''' with open(_lowerCamelCase, encoding='''utf-8''' ) as readme_file: __A , __A = _split_yaml_from_readme(readme_file.read() ) if yaml_string is not None: return cls.from_yaml_string(_lowerCamelCase ) else: return cls() def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : Path ): '''simple docstring''' if path.exists(): with open(_lowerCamelCase, encoding='''utf-8''' ) as readme_file: __A = readme_file.read() else: __A = None __A = self._to_readme(_lowerCamelCase ) with open(_lowerCamelCase, '''w''', encoding='''utf-8''' ) as readme_file: readme_file.write(_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : int, _lowerCamelCase : Optional[str] = None ): '''simple docstring''' if readme_content is not None: __A , __A = _split_yaml_from_readme(_lowerCamelCase ) __A = '''---\n''' + self.to_yaml_string() + '''---\n''' + content else: __A = '''---\n''' + self.to_yaml_string() + '''---\n''' return full_content @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[Any], _lowerCamelCase : str ): '''simple docstring''' __A = yaml.load(_lowerCamelCase, Loader=_NoDuplicateSafeLoader ) or {} # Convert the YAML keys to DatasetMetadata fields __A = { (key.replace('''-''', '''_''' ) if key.replace('''-''', '''_''' ) in cls._FIELDS_WITH_DASHES else key): value for key, value in metadata_dict.items() } return cls(**_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' return yaml.safe_dump( { (key.replace('''_''', '''-''' ) if key in self._FIELDS_WITH_DASHES else key): value for key, value in self.items() }, sort_keys=_lowerCamelCase, allow_unicode=_lowerCamelCase, encoding='''utf-8''', ).decode('''utf-8''' ) lowercase_ = { 'image-classification': [], 'translation': [], 'image-segmentation': [], 'fill-mask': [], 'automatic-speech-recognition': [], 'token-classification': [], 'sentence-similarity': [], 'audio-classification': [], 'question-answering': [], 'summarization': [], 'zero-shot-classification': [], 'table-to-text': [], 'feature-extraction': [], 'other': [], 'multiple-choice': [], 'text-classification': [], 'text-to-image': [], 'text2text-generation': [], 'zero-shot-image-classification': [], 'tabular-classification': [], 'tabular-regression': [], 'image-to-image': [], 'tabular-to-text': [], 'unconditional-image-generation': [], 'text-retrieval': [], 'text-to-speech': [], 'object-detection': [], 'audio-to-audio': [], 'text-generation': [], 'conversational': [], 'table-question-answering': [], 'visual-question-answering': [], 'image-to-text': [], 'reinforcement-learning': [], 'voice-activity-detection': [], 'time-series-forecasting': [], 'document-question-answering': [], } if __name__ == "__main__": from argparse import ArgumentParser lowercase_ = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.') ap.add_argument('readme_filepath') lowercase_ = ap.parse_args() lowercase_ = Path(args.readme_filepath) lowercase_ = DatasetMetadata.from_readme(readme_filepath) print(dataset_metadata) dataset_metadata.to_readme(readme_filepath)
266
"""simple docstring""" import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowercase_ = logging.get_logger(__name__) lowercase_ = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } lowercase_ = { 'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'}, 'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'}, 'tokenizer_config_file': { 'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json' }, } lowercase_ = {'facebook/blenderbot-3B': 128} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def lowerCAmelCase ( ): """simple docstring""" __A = ( list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) ) ) __A = bs[:] __A = 0 for b in range(2**8 ): if b not in bs: bs.append(__UpperCamelCase ) cs.append(2**8 + n ) n += 1 __A = [chr(__UpperCamelCase ) for n in cs] return dict(zip(__UpperCamelCase , __UpperCamelCase ) ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = set() __A = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __A = char return pairs class snake_case ( _lowerCAmelCase ): '''simple docstring''' A_ : Tuple = VOCAB_FILES_NAMES A_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP A_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A_ : Optional[Any] = ["input_ids", "attention_mask"] def __init__( self : Dict, _lowerCamelCase : Optional[Any], _lowerCamelCase : List[str], _lowerCamelCase : Dict="replace", _lowerCamelCase : Any="<s>", _lowerCamelCase : Optional[int]="</s>", _lowerCamelCase : Dict="</s>", _lowerCamelCase : List[Any]="<s>", _lowerCamelCase : List[str]="<unk>", _lowerCamelCase : str="<pad>", _lowerCamelCase : Any="<mask>", _lowerCamelCase : Any=False, **_lowerCamelCase : Tuple, ): '''simple docstring''' __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else bos_token __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else eos_token __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else sep_token __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else cls_token __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else unk_token __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else mask_token super().__init__( errors=_lowerCamelCase, bos_token=_lowerCamelCase, eos_token=_lowerCamelCase, unk_token=_lowerCamelCase, sep_token=_lowerCamelCase, cls_token=_lowerCamelCase, pad_token=_lowerCamelCase, mask_token=_lowerCamelCase, add_prefix_space=_lowerCamelCase, **_lowerCamelCase, ) with open(_lowerCamelCase, encoding='''utf-8''' ) as vocab_handle: __A = json.load(_lowerCamelCase ) __A = {v: k for k, v in self.encoder.items()} __A = errors # how to handle errors in decoding __A = bytes_to_unicode() __A = {v: k for k, v in self.byte_encoder.items()} with open(_lowerCamelCase, encoding='''utf-8''' ) as merges_handle: __A = merges_handle.read().split('''\n''' )[1:-1] __A = [tuple(merge.split() ) for merge in bpe_merges] __A = dict(zip(_lowerCamelCase, range(len(_lowerCamelCase ) ) ) ) __A = {} __A = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __A = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' return len(self.encoder ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' return dict(self.encoder, **self.added_tokens_encoder ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : List[Any] ): '''simple docstring''' if token in self.cache: return self.cache[token] __A = tuple(_lowerCamelCase ) __A = get_pairs(_lowerCamelCase ) if not pairs: return token while True: __A = min(_lowerCamelCase, key=lambda _lowerCamelCase : self.bpe_ranks.get(_lowerCamelCase, float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break __A , __A = bigram __A = [] __A = 0 while i < len(_lowerCamelCase ): try: __A = word.index(_lowerCamelCase, _lowerCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __A = j if word[i] == first and i < len(_lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __A = tuple(_lowerCamelCase ) __A = new_word if len(_lowerCamelCase ) == 1: break else: __A = get_pairs(_lowerCamelCase ) __A = ''' '''.join(_lowerCamelCase ) __A = word return word def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : Dict ): '''simple docstring''' __A = [] for token in re.findall(self.pat, _lowerCamelCase ): __A = ''''''.join( self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowerCamelCase ).split(''' ''' ) ) return bpe_tokens def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : Dict ): '''simple docstring''' return self.encoder.get(_lowerCamelCase, self.encoder.get(self.unk_token ) ) def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : Any ): '''simple docstring''' return self.decoder.get(_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : Dict ): '''simple docstring''' __A = ''''''.join(_lowerCamelCase ) __A = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''', errors=self.errors ) return text def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : str, _lowerCamelCase : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(_lowerCamelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __A = os.path.join( _lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) __A = os.path.join( _lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(_lowerCamelCase, '''w''', encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=_lowerCamelCase, ensure_ascii=_lowerCamelCase ) + '''\n''' ) __A = 0 with open(_lowerCamelCase, '''w''', encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda _lowerCamelCase : kv[1] ): if index != token_index: logger.warning( f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.' ''' Please check that the tokenizer is not corrupted!''' ) __A = token_index writer.write(''' '''.join(_lowerCamelCase ) + '''\n''' ) index += 1 return vocab_file, merge_file def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : List[int], _lowerCamelCase : Optional[List[int]] = None, _lowerCamelCase : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCamelCase, token_ids_a=_lowerCamelCase, already_has_special_tokens=_lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(_lowerCamelCase )) + [1] return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1] def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : List[int], _lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' __A = [self.sep_token_id] __A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : Union[str, Any], _lowerCamelCase : List[str]=False, **_lowerCamelCase : List[Any] ): '''simple docstring''' __A = kwargs.pop('''add_prefix_space''', self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(_lowerCamelCase ) > 0 and not text[0].isspace()): __A = ''' ''' + text return (text, kwargs) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : List[int], _lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' return token_ids_a + [self.eos_token_id] def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : "Conversation" ): '''simple docstring''' __A = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(''' ''' + text ) else: # Generated responses should contain them already. inputs.append(_lowerCamelCase ) __A = ''' '''.join(_lowerCamelCase ) __A = self.encode(_lowerCamelCase ) if len(_lowerCamelCase ) > self.model_max_length: __A = input_ids[-self.model_max_length :] logger.warning(f'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' ) return input_ids
266
1
"""simple docstring""" from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .attention_processor import AttentionProcessor, AttnProcessor from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder @dataclass class snake_case ( _lowerCAmelCase ): '''simple docstring''' A_ : "DiagonalGaussianDistribution" class snake_case ( _lowerCAmelCase , _lowerCAmelCase ): '''simple docstring''' A_ : List[Any] = True @register_to_config def __init__( self : Optional[int], _lowerCamelCase : int = 3, _lowerCamelCase : int = 3, _lowerCamelCase : Tuple[str] = ("DownEncoderBlock2D",), _lowerCamelCase : Tuple[str] = ("UpDecoderBlock2D",), _lowerCamelCase : Tuple[int] = (64,), _lowerCamelCase : int = 1, _lowerCamelCase : str = "silu", _lowerCamelCase : int = 4, _lowerCamelCase : int = 32, _lowerCamelCase : int = 32, _lowerCamelCase : float = 0.1_82_15, ): '''simple docstring''' super().__init__() # pass init params to Encoder __A = Encoder( in_channels=_lowerCamelCase, out_channels=_lowerCamelCase, down_block_types=_lowerCamelCase, block_out_channels=_lowerCamelCase, layers_per_block=_lowerCamelCase, act_fn=_lowerCamelCase, norm_num_groups=_lowerCamelCase, double_z=_lowerCamelCase, ) # pass init params to Decoder __A = Decoder( in_channels=_lowerCamelCase, out_channels=_lowerCamelCase, up_block_types=_lowerCamelCase, block_out_channels=_lowerCamelCase, layers_per_block=_lowerCamelCase, norm_num_groups=_lowerCamelCase, act_fn=_lowerCamelCase, ) __A = nn.Convad(2 * latent_channels, 2 * latent_channels, 1 ) __A = nn.Convad(_lowerCamelCase, _lowerCamelCase, 1 ) __A = False __A = False # only relevant if vae tiling is enabled __A = self.config.sample_size __A = ( self.config.sample_size[0] if isinstance(self.config.sample_size, (list, tuple) ) else self.config.sample_size ) __A = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) ) __A = 0.25 def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : Union[str, Any], _lowerCamelCase : int=False ): '''simple docstring''' if isinstance(_lowerCamelCase, (Encoder, Decoder) ): __A = value def _SCREAMING_SNAKE_CASE ( self : Tuple, _lowerCamelCase : bool = True ): '''simple docstring''' __A = use_tiling def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' self.enable_tiling(_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' __A = True def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' __A = False @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = {} def fn_recursive_add_processors(_lowerCamelCase : str, _lowerCamelCase : torch.nn.Module, _lowerCamelCase : Dict[str, AttentionProcessor] ): if hasattr(_lowerCamelCase, '''set_processor''' ): __A = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(f'{name}.{sub_name}', _lowerCamelCase, _lowerCamelCase ) return processors for name, module in self.named_children(): fn_recursive_add_processors(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) return processors def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ): '''simple docstring''' __A = len(self.attn_processors.keys() ) if isinstance(_lowerCamelCase, _lowerCamelCase ) and len(_lowerCamelCase ) != count: raise ValueError( f'A dict of processors was passed, but the number of processors {len(_lowerCamelCase )} does not match the' f' number of attention layers: {count}. Please make sure to pass {count} processor classes.' ) def fn_recursive_attn_processor(_lowerCamelCase : str, _lowerCamelCase : torch.nn.Module, _lowerCamelCase : Any ): if hasattr(_lowerCamelCase, '''set_processor''' ): if not isinstance(_lowerCamelCase, _lowerCamelCase ): module.set_processor(_lowerCamelCase ) else: module.set_processor(processor.pop(f'{name}.processor' ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f'{name}.{sub_name}', _lowerCamelCase, _lowerCamelCase ) for name, module in self.named_children(): fn_recursive_attn_processor(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : List[str] ): '''simple docstring''' self.set_attn_processor(AttnProcessor() ) @apply_forward_hook def _SCREAMING_SNAKE_CASE ( self : int, _lowerCamelCase : torch.FloatTensor, _lowerCamelCase : bool = True ): '''simple docstring''' if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size): return self.tiled_encode(_lowerCamelCase, return_dict=_lowerCamelCase ) if self.use_slicing and x.shape[0] > 1: __A = [self.encoder(_lowerCamelCase ) for x_slice in x.split(1 )] __A = torch.cat(_lowerCamelCase ) else: __A = self.encoder(_lowerCamelCase ) __A = self.quant_conv(_lowerCamelCase ) __A = DiagonalGaussianDistribution(_lowerCamelCase ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : torch.FloatTensor, _lowerCamelCase : bool = True ): '''simple docstring''' if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size): return self.tiled_decode(_lowerCamelCase, return_dict=_lowerCamelCase ) __A = self.post_quant_conv(_lowerCamelCase ) __A = self.decoder(_lowerCamelCase ) if not return_dict: return (dec,) return DecoderOutput(sample=_lowerCamelCase ) @apply_forward_hook def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : torch.FloatTensor, _lowerCamelCase : bool = True ): '''simple docstring''' if self.use_slicing and z.shape[0] > 1: __A = [self._decode(_lowerCamelCase ).sample for z_slice in z.split(1 )] __A = torch.cat(_lowerCamelCase ) else: __A = self._decode(_lowerCamelCase ).sample if not return_dict: return (decoded,) return DecoderOutput(sample=_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : Optional[Any], _lowerCamelCase : List[Any], _lowerCamelCase : str ): '''simple docstring''' __A = min(a.shape[2], b.shape[2], _lowerCamelCase ) for y in range(_lowerCamelCase ): __A = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent) return b def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : int, _lowerCamelCase : Any, _lowerCamelCase : Union[str, Any] ): '''simple docstring''' __A = min(a.shape[3], b.shape[3], _lowerCamelCase ) for x in range(_lowerCamelCase ): __A = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent) return b def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : torch.FloatTensor, _lowerCamelCase : bool = True ): '''simple docstring''' __A = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) ) __A = int(self.tile_latent_min_size * self.tile_overlap_factor ) __A = self.tile_latent_min_size - blend_extent # Split the image into 512x512 tiles and encode them separately. __A = [] for i in range(0, x.shape[2], _lowerCamelCase ): __A = [] for j in range(0, x.shape[3], _lowerCamelCase ): __A = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size] __A = self.encoder(_lowerCamelCase ) __A = self.quant_conv(_lowerCamelCase ) row.append(_lowerCamelCase ) rows.append(_lowerCamelCase ) __A = [] for i, row in enumerate(_lowerCamelCase ): __A = [] for j, tile in enumerate(_lowerCamelCase ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: __A = self.blend_v(rows[i - 1][j], _lowerCamelCase, _lowerCamelCase ) if j > 0: __A = self.blend_h(row[j - 1], _lowerCamelCase, _lowerCamelCase ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(_lowerCamelCase, dim=3 ) ) __A = torch.cat(_lowerCamelCase, dim=2 ) __A = DiagonalGaussianDistribution(_lowerCamelCase ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : int, _lowerCamelCase : torch.FloatTensor, _lowerCamelCase : bool = True ): '''simple docstring''' __A = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) ) __A = int(self.tile_sample_min_size * self.tile_overlap_factor ) __A = self.tile_sample_min_size - blend_extent # Split z into overlapping 64x64 tiles and decode them separately. # The tiles have an overlap to avoid seams between tiles. __A = [] for i in range(0, z.shape[2], _lowerCamelCase ): __A = [] for j in range(0, z.shape[3], _lowerCamelCase ): __A = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size] __A = self.post_quant_conv(_lowerCamelCase ) __A = self.decoder(_lowerCamelCase ) row.append(_lowerCamelCase ) rows.append(_lowerCamelCase ) __A = [] for i, row in enumerate(_lowerCamelCase ): __A = [] for j, tile in enumerate(_lowerCamelCase ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: __A = self.blend_v(rows[i - 1][j], _lowerCamelCase, _lowerCamelCase ) if j > 0: __A = self.blend_h(row[j - 1], _lowerCamelCase, _lowerCamelCase ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(_lowerCamelCase, dim=3 ) ) __A = torch.cat(_lowerCamelCase, dim=2 ) if not return_dict: return (dec,) return DecoderOutput(sample=_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : torch.FloatTensor, _lowerCamelCase : bool = False, _lowerCamelCase : bool = True, _lowerCamelCase : Optional[torch.Generator] = None, ): '''simple docstring''' __A = sample __A = self.encode(_lowerCamelCase ).latent_dist if sample_posterior: __A = posterior.sample(generator=_lowerCamelCase ) else: __A = posterior.mode() __A = self.decode(_lowerCamelCase ).sample if not return_dict: return (dec,) return DecoderOutput(sample=_lowerCamelCase )
266
"""simple docstring""" import importlib import inspect import json import os import re import shutil import sys from pathlib import Path from typing import Dict, Optional, Union from urllib import request from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info from packaging import version from .. import __version__ from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging lowercase_ = ( 'https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py' ) lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name def lowerCAmelCase ( ): """simple docstring""" __A = '''https://pypi.org/pypi/diffusers/json''' __A = json.loads(request.urlopen(__UpperCamelCase ).read() )['''releases'''].keys() return sorted(__UpperCamelCase , key=lambda __UpperCamelCase : version.Version(__UpperCamelCase ) ) def lowerCAmelCase ( ): """simple docstring""" if HF_MODULES_CACHE in sys.path: return sys.path.append(__UpperCamelCase ) os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase ) __A = Path(__UpperCamelCase ) / '''__init__.py''' if not init_path.exists(): init_path.touch() def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" init_hf_modules() __A = Path(__UpperCamelCase ) / name # If the parent module does not exist yet, recursively create it. if not dynamic_module_path.parent.exists(): create_dynamic_module(dynamic_module_path.parent ) os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase ) __A = dynamic_module_path / '''__init__.py''' if not init_path.exists(): init_path.touch() def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" with open(__UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f: __A = f.read() # Imports of the form `import .xxx` __A = re.findall('''^\s*import\s+\.(\S+)\s*$''' , __UpperCamelCase , flags=re.MULTILINE ) # Imports of the form `from .xxx import yyy` relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , __UpperCamelCase , flags=re.MULTILINE ) # Unique-ify return list(set(__UpperCamelCase ) ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = False __A = [module_file] __A = [] # Let's recurse through all relative imports while not no_change: __A = [] for f in files_to_check: new_imports.extend(get_relative_imports(__UpperCamelCase ) ) __A = Path(__UpperCamelCase ).parent __A = [str(module_path / m ) for m in new_imports] __A = [f for f in new_import_files if f not in all_relative_imports] __A = [f'{f}.py' for f in new_import_files] __A = len(__UpperCamelCase ) == 0 all_relative_imports.extend(__UpperCamelCase ) return all_relative_imports def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" with open(__UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f: __A = f.read() # Imports of the form `import xxx` __A = re.findall('''^\s*import\s+(\S+)\s*$''' , __UpperCamelCase , flags=re.MULTILINE ) # Imports of the form `from xxx import yyy` imports += re.findall('''^\s*from\s+(\S+)\s+import''' , __UpperCamelCase , flags=re.MULTILINE ) # Only keep the top-level module __A = [imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )] # Unique-ify and test we got them all __A = list(set(__UpperCamelCase ) ) __A = [] for imp in imports: try: importlib.import_module(__UpperCamelCase ) except ImportError: missing_packages.append(__UpperCamelCase ) if len(__UpperCamelCase ) > 0: raise ImportError( '''This modeling file requires the following packages that were not found in your environment: ''' f'{", ".join(__UpperCamelCase )}. Run `pip install {" ".join(__UpperCamelCase )}`' ) return get_relative_imports(__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" __A = module_path.replace(os.path.sep , '''.''' ) __A = importlib.import_module(__UpperCamelCase ) if class_name is None: return find_pipeline_class(__UpperCamelCase ) return getattr(__UpperCamelCase , __UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" from ..pipelines import DiffusionPipeline __A = dict(inspect.getmembers(__UpperCamelCase , inspect.isclass ) ) __A = None for cls_name, cls in cls_members.items(): if ( cls_name != DiffusionPipeline.__name__ and issubclass(cls , __UpperCamelCase ) and cls.__module__.split('''.''' )[0] != "diffusers" ): if pipeline_class is not None: raise ValueError( f'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:' f' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in' f' {loaded_module}.' ) __A = cls return pipeline_class def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , ): """simple docstring""" __A = str(__UpperCamelCase ) __A = os.path.join(__UpperCamelCase , __UpperCamelCase ) if os.path.isfile(__UpperCamelCase ): __A = module_file_or_url __A = '''local''' elif pretrained_model_name_or_path.count('''/''' ) == 0: __A = get_diffusers_versions() # cut ".dev0" __A = '''v''' + '''.'''.join(__version__.split('''.''' )[:3] ) # retrieve github version that matches if revision is None: __A = latest_version if latest_version[1:] in available_versions else '''main''' logger.info(f'Defaulting to latest_version: {revision}.' ) elif revision in available_versions: __A = f'v{revision}' elif revision == "main": __A = revision else: raise ValueError( f'`custom_revision`: {revision} does not exist. Please make sure to choose one of' f' {", ".join(available_versions + ["main"] )}.' ) # community pipeline on GitHub __A = COMMUNITY_PIPELINES_URL.format(revision=__UpperCamelCase , pipeline=__UpperCamelCase ) try: __A = cached_download( __UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , proxies=__UpperCamelCase , resume_download=__UpperCamelCase , local_files_only=__UpperCamelCase , use_auth_token=__UpperCamelCase , ) __A = '''git''' __A = pretrained_model_name_or_path + '''.py''' except EnvironmentError: logger.error(f'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' ) raise else: try: # Load from URL or cache if already cached __A = hf_hub_download( __UpperCamelCase , __UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , proxies=__UpperCamelCase , resume_download=__UpperCamelCase , local_files_only=__UpperCamelCase , use_auth_token=__UpperCamelCase , ) __A = os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) ) except EnvironmentError: logger.error(f'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' ) raise # Check we have all the requirements in our environment __A = check_imports(__UpperCamelCase ) # Now we move the module inside our cached dynamic modules. __A = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule create_dynamic_module(__UpperCamelCase ) __A = Path(__UpperCamelCase ) / full_submodule if submodule == "local" or submodule == "git": # We always copy local files (we could hash the file to see if there was a change, and give them the name of # that hash, to only copy when there is a modification but it seems overkill for now). # The only reason we do the copy is to avoid putting too many folders in sys.path. shutil.copy(__UpperCamelCase , submodule_path / module_file ) for module_needed in modules_needed: __A = f'{module_needed}.py' shutil.copy(os.path.join(__UpperCamelCase , __UpperCamelCase ) , submodule_path / module_needed ) else: # Get the commit hash # TODO: we will get this info in the etag soon, so retrieve it from there and not here. if isinstance(__UpperCamelCase , __UpperCamelCase ): __A = use_auth_token elif use_auth_token is True: __A = HfFolder.get_token() else: __A = None __A = model_info(__UpperCamelCase , revision=__UpperCamelCase , token=__UpperCamelCase ).sha # The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the # benefit of versioning. __A = submodule_path / commit_hash __A = full_submodule + os.path.sep + commit_hash create_dynamic_module(__UpperCamelCase ) if not (submodule_path / module_file).exists(): shutil.copy(__UpperCamelCase , submodule_path / module_file ) # Make sure we also have every file with relative for module_needed in modules_needed: if not (submodule_path / module_needed).exists(): get_cached_module_file( __UpperCamelCase , f'{module_needed}.py' , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , resume_download=__UpperCamelCase , proxies=__UpperCamelCase , use_auth_token=__UpperCamelCase , revision=__UpperCamelCase , local_files_only=__UpperCamelCase , ) return os.path.join(__UpperCamelCase , __UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , **__UpperCamelCase , ): """simple docstring""" __A = get_cached_module_file( __UpperCamelCase , __UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , resume_download=__UpperCamelCase , proxies=__UpperCamelCase , use_auth_token=__UpperCamelCase , revision=__UpperCamelCase , local_files_only=__UpperCamelCase , ) return get_class_in_module(__UpperCamelCase , final_module.replace('''.py''' , '''''' ) )
266
1
"""simple docstring""" import argparse from pathlib import Path import requests import torch from PIL import Image from transformers import ( RobertaTokenizer, TrOCRConfig, TrOCRForCausalLM, TrOCRProcessor, VisionEncoderDecoderModel, ViTConfig, ViTImageProcessor, ViTModel, ) from transformers.utils import logging logging.set_verbosity_info() lowercase_ = logging.get_logger(__name__) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" __A = [] for i in range(encoder_config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f'encoder.deit.blocks.{i}.norm1.weight', f'encoder.encoder.layer.{i}.layernorm_before.weight') ) rename_keys.append((f'encoder.deit.blocks.{i}.norm1.bias', f'encoder.encoder.layer.{i}.layernorm_before.bias') ) rename_keys.append( (f'encoder.deit.blocks.{i}.attn.proj.weight', f'encoder.encoder.layer.{i}.attention.output.dense.weight') ) rename_keys.append( (f'encoder.deit.blocks.{i}.attn.proj.bias', f'encoder.encoder.layer.{i}.attention.output.dense.bias') ) rename_keys.append( (f'encoder.deit.blocks.{i}.norm2.weight', f'encoder.encoder.layer.{i}.layernorm_after.weight') ) rename_keys.append((f'encoder.deit.blocks.{i}.norm2.bias', f'encoder.encoder.layer.{i}.layernorm_after.bias') ) rename_keys.append( (f'encoder.deit.blocks.{i}.mlp.fc1.weight', f'encoder.encoder.layer.{i}.intermediate.dense.weight') ) rename_keys.append( (f'encoder.deit.blocks.{i}.mlp.fc1.bias', f'encoder.encoder.layer.{i}.intermediate.dense.bias') ) rename_keys.append( (f'encoder.deit.blocks.{i}.mlp.fc2.weight', f'encoder.encoder.layer.{i}.output.dense.weight') ) rename_keys.append((f'encoder.deit.blocks.{i}.mlp.fc2.bias', f'encoder.encoder.layer.{i}.output.dense.bias') ) # cls token, position embeddings and patch embeddings of encoder rename_keys.extend( [ ('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''), ('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''), ('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''), ('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''), ('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''), ('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''), ] ) return rename_keys def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" for i in range(encoder_config.num_hidden_layers ): # queries, keys and values (only weights, no biases) __A = state_dict.pop(f'encoder.deit.blocks.{i}.attn.qkv.weight' ) __A = in_proj_weight[ : encoder_config.hidden_size, : ] __A = in_proj_weight[ encoder_config.hidden_size : encoder_config.hidden_size * 2, : ] __A = in_proj_weight[ -encoder_config.hidden_size :, : ] def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" __A = dct.pop(__UpperCamelCase ) __A = val def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" if "handwritten" in checkpoint_url: __A = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" # # url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg" elif "printed" in checkpoint_url or "stage1" in checkpoint_url: __A = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg''' __A = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw ).convert('''RGB''' ) return im @torch.no_grad() def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" __A = ViTConfig(image_size=3_8_4 , qkv_bias=__UpperCamelCase ) __A = TrOCRConfig() # size of the architecture if "base" in checkpoint_url: __A = 7_6_8 elif "large" in checkpoint_url: # use ViT-large encoder __A = 1_0_2_4 __A = 4_0_9_6 __A = 2_4 __A = 1_6 __A = 1_0_2_4 else: raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''' ) # the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards if "large-printed" in checkpoint_url or "stage1" in checkpoint_url: __A = False __A = '''relu''' __A = 1_0_2_4 __A = True __A = False __A = False # load HuggingFace model __A = ViTModel(__UpperCamelCase , add_pooling_layer=__UpperCamelCase ) __A = TrOCRForCausalLM(__UpperCamelCase ) __A = VisionEncoderDecoderModel(encoder=__UpperCamelCase , decoder=__UpperCamelCase ) model.eval() # load state_dict of original model, rename some keys __A = torch.hub.load_state_dict_from_url(__UpperCamelCase , map_location='''cpu''' , check_hash=__UpperCamelCase )['''model'''] __A = create_rename_keys(__UpperCamelCase , __UpperCamelCase ) for src, dest in rename_keys: rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) read_in_q_k_v(__UpperCamelCase , __UpperCamelCase ) # remove parameters we don't need del state_dict["encoder.deit.head.weight"] del state_dict["encoder.deit.head.bias"] del state_dict["decoder.version"] # add prefix to decoder keys for key, val in state_dict.copy().items(): __A = state_dict.pop(__UpperCamelCase ) if key.startswith('''decoder''' ) and "output_projection" not in key: __A = val else: __A = val # load state dict model.load_state_dict(__UpperCamelCase ) # Check outputs on an image __A = ViTImageProcessor(size=encoder_config.image_size ) __A = RobertaTokenizer.from_pretrained('''roberta-large''' ) __A = TrOCRProcessor(__UpperCamelCase , __UpperCamelCase ) __A = processor(images=prepare_img(__UpperCamelCase ) , return_tensors='''pt''' ).pixel_values # verify logits __A = torch.tensor([[model.config.decoder.decoder_start_token_id]] ) __A = model(pixel_values=__UpperCamelCase , decoder_input_ids=__UpperCamelCase ) __A = outputs.logits __A = torch.Size([1, 1, 5_0_2_6_5] ) if "trocr-base-handwritten" in checkpoint_url: __A = torch.tensor( [-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] ) elif "trocr-large-handwritten" in checkpoint_url: __A = torch.tensor( [-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] ) elif "trocr-base-printed" in checkpoint_url: __A = torch.tensor( [-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] ) elif "trocr-large-printed" in checkpoint_url: __A = torch.tensor( [-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] ) if "stage1" not in checkpoint_url: assert logits.shape == expected_shape, "Shape of logits not as expected" assert torch.allclose(logits[0, 0, :1_0] , __UpperCamelCase , atol=1e-3 ), "First elements of logits not as expected" Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase ) print(f'Saving model to {pytorch_dump_folder_path}' ) model.save_pretrained(__UpperCamelCase ) print(f'Saving processor to {pytorch_dump_folder_path}' ) processor.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument( '--checkpoint_url', default='https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt', type=str, help='URL to the original PyTorch checkpoint (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) lowercase_ = parser.parse_args() convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
266
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : Optional[int] ): '''simple docstring''' for model_result in results.values(): for batch_size, sequence_length in zip(model_result['''bs'''], model_result['''ss'''] ): __A = model_result['''result'''][batch_size][sequence_length] self.assertIsNotNone(_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = '''sgugger/tiny-distilbert-classification''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, only_pretrain_model=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, torchscript=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == '''cpu''', '''Cant do half precision''' ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, fpaa=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = AutoConfig.from_pretrained(_lowerCamelCase ) # set architectures equal to `None` __A = None __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase, configs=[config] ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == '''cpu''', '''Can\'t do half precision''' ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], fpaa=_lowerCamelCase, multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = AutoConfig.from_pretrained(_lowerCamelCase ) __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase, configs=[config] ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' __A = '''sshleifer/tinier_bart''' __A = AutoConfig.from_pretrained(_lowerCamelCase ) __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase, configs=[config] ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = AutoConfig.from_pretrained(_lowerCamelCase ) __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase, configs=[config] ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = '''sshleifer/tinier_bart''' __A = AutoConfig.from_pretrained(_lowerCamelCase ) __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase, configs=[config] ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' with tempfile.TemporaryDirectory() as tmp_dir: __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, save_to_csv=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], inference_time_csv_file=os.path.join(_lowerCamelCase, '''inf_time.csv''' ), train_memory_csv_file=os.path.join(_lowerCamelCase, '''train_mem.csv''' ), inference_memory_csv_file=os.path.join(_lowerCamelCase, '''inf_mem.csv''' ), train_time_csv_file=os.path.join(_lowerCamelCase, '''train_time.csv''' ), env_info_csv_file=os.path.join(_lowerCamelCase, '''env.csv''' ), multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) benchmark.run() self.assertTrue(Path(os.path.join(_lowerCamelCase, '''inf_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCamelCase, '''train_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCamelCase, '''inf_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCamelCase, '''train_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCamelCase, '''env.csv''' ) ).exists() ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' def _check_summary_is_not_empty(_lowerCamelCase : List[Any] ): self.assertTrue(hasattr(_lowerCamelCase, '''sequential''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''cumulative''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''current''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''total''' ) ) with tempfile.TemporaryDirectory() as tmp_dir: __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], log_filename=os.path.join(_lowerCamelCase, '''log.txt''' ), log_print=_lowerCamelCase, trace_memory_line_by_line=_lowerCamelCase, multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(_lowerCamelCase, '''log.txt''' ) ).exists() )
266
1
"""simple docstring""" import argparse import os import pickle import sys import torch from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() # We do this to be able to load python 2 datasets pickles # See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918 lowercase_ = data_utils.TransfoXLTokenizer lowercase_ = data_utils.TransfoXLCorpus lowercase_ = data_utils lowercase_ = data_utils def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" if transfo_xl_dataset_file: # Convert a pre-processed corpus (see original TensorFlow repo) with open(__UpperCamelCase , '''rb''' ) as fp: __A = pickle.load(__UpperCamelCase , encoding='''latin1''' ) # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term) __A = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file'''] print(f'Save vocabulary to {pytorch_vocab_dump_path}' ) __A = corpus.vocab.__dict__ torch.save(__UpperCamelCase , __UpperCamelCase ) __A = corpus.__dict__ corpus_dict_no_vocab.pop('''vocab''' , __UpperCamelCase ) __A = pytorch_dump_folder_path + '''/''' + CORPUS_NAME print(f'Save dataset to {pytorch_dataset_dump_path}' ) torch.save(__UpperCamelCase , __UpperCamelCase ) if tf_checkpoint_path: # Convert a pre-trained TensorFlow model __A = os.path.abspath(__UpperCamelCase ) __A = os.path.abspath(__UpperCamelCase ) print(f'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' ) # Initialise PyTorch model if transfo_xl_config_file == "": __A = TransfoXLConfig() else: __A = TransfoXLConfig.from_json_file(__UpperCamelCase ) print(f'Building PyTorch model from configuration: {config}' ) __A = TransfoXLLMHeadModel(__UpperCamelCase ) __A = load_tf_weights_in_transfo_xl(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # Save pytorch-model __A = os.path.join(__UpperCamelCase , __UpperCamelCase ) __A = os.path.join(__UpperCamelCase , __UpperCamelCase ) print(f'Save PyTorch model to {os.path.abspath(__UpperCamelCase )}' ) torch.save(model.state_dict() , __UpperCamelCase ) print(f'Save configuration file to {os.path.abspath(__UpperCamelCase )}' ) with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the folder to store the PyTorch model or dataset/vocab.', ) parser.add_argument( '--tf_checkpoint_path', default='', type=str, help='An optional path to a TensorFlow checkpoint path to be converted.', ) parser.add_argument( '--transfo_xl_config_file', default='', type=str, help=( 'An optional config json file corresponding to the pre-trained BERT model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--transfo_xl_dataset_file', default='', type=str, help='An optional dataset file to be converted in a vocabulary.', ) lowercase_ = parser.parse_args() convert_transfo_xl_checkpoint_to_pytorch( args.tf_checkpoint_path, args.transfo_xl_config_file, args.pytorch_dump_folder_path, args.transfo_xl_dataset_file, )
266
"""simple docstring""" import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowercase_ = get_tests_dir('fixtures/test_sentencepiece_no_bos.model') @require_sentencepiece @require_tokenizers class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : Optional[Any] = PegasusTokenizer A_ : int = PegasusTokenizerFast A_ : Optional[Any] = True A_ : Union[str, Any] = True def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing __A = PegasusTokenizer(_lowerCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' return PegasusTokenizer.from_pretrained('''google/pegasus-large''' ) def _SCREAMING_SNAKE_CASE ( self : int, **_lowerCamelCase : List[Any] ): '''simple docstring''' return PegasusTokenizer.from_pretrained(self.tmpdirname, **_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : Dict ): '''simple docstring''' return ("This is a test", "This is a test") def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' __A = '''</s>''' __A = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ), _lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ), _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' __A = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0], '''<pad>''' ) self.assertEqual(vocab_keys[1], '''</s>''' ) self.assertEqual(vocab_keys[-1], '''v''' ) self.assertEqual(len(_lowerCamelCase ), 11_03 ) def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size, 11_03 ) def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) __A = self.tokenizer_class.from_pretrained(self.tmpdirname ) __A = ( '''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important''' ''' </s> <pad> <pad> <pad>''' ) __A = rust_tokenizer([raw_input_str], return_tensors=_lowerCamelCase, add_special_tokens=_lowerCamelCase ).input_ids[0] __A = py_tokenizer([raw_input_str], return_tensors=_lowerCamelCase, add_special_tokens=_lowerCamelCase ).input_ids[0] self.assertListEqual(_lowerCamelCase, _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' __A = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word __A = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.''' __A = [2, 4_13, 6_15, 1_14, 3, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1] __A = tokenizer([raw_input_str], return_tensors=_lowerCamelCase ).input_ids[0] self.assertListEqual(_lowerCamelCase, _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 9_61_03 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 1_03 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_05 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 10_24 __A = '''To ensure a smooth flow of bank resolutions.''' __A = [4_13, 6_15, 1_14, 22_91, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1] __A = tokenizer([raw_input_str], return_tensors=_lowerCamelCase ).input_ids[0] self.assertListEqual(_lowerCamelCase, _lowerCamelCase ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = ['''This is going to be way too long.''' * 1_50, '''short example'''] __A = ['''not super long but more than 5 tokens''', '''tiny'''] __A = self._large_tokenizer(_lowerCamelCase, padding=_lowerCamelCase, truncation=_lowerCamelCase, return_tensors='''pt''' ) __A = self._large_tokenizer( text_target=_lowerCamelCase, max_length=5, padding=_lowerCamelCase, truncation=_lowerCamelCase, return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 10_24) assert batch.attention_mask.shape == (2, 10_24) assert targets["input_ids"].shape == (2, 5) assert len(_lowerCamelCase ) == 2 # input_ids, attention_mask. @slow def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' # fmt: off __A = {'''input_ids''': [[3_89_79, 1_43, 1_84_85, 6_06, 1_30, 2_66_69, 8_76_86, 1_21, 5_41_89, 11_29, 1_11, 2_66_69, 8_76_86, 1_21, 91_14, 1_47_87, 1_21, 1_32_49, 1_58, 5_92, 9_56, 1_21, 1_46_21, 3_15_76, 1_43, 6_26_13, 1_08, 96_88, 9_30, 4_34_30, 1_15_62, 6_26_13, 3_04, 1_08, 1_14_43, 8_97, 1_08, 93_14, 1_74_15, 6_33_99, 1_08, 1_14_43, 76_14, 1_83_16, 1_18, 42_84, 71_48, 1_24_30, 1_43, 14_00, 2_57_03, 1_58, 1_11, 42_84, 71_48, 1_17_72, 1_43, 2_12_97, 10_64, 1_58, 1_22, 2_04, 35_06, 17_54, 11_33, 1_47_87, 15_81, 1_15, 3_32_24, 44_82, 1_11, 13_55, 1_10, 2_91_73, 3_17, 5_08_33, 1_08, 2_01_47, 9_46_65, 1_11, 7_71_98, 1_07, 1], [1_10, 6_26_13, 1_17, 6_38, 1_12, 11_33, 1_21, 2_00_98, 13_55, 7_90_50, 1_38_72, 1_35, 15_96, 5_35_41, 13_52, 1_41, 1_30_39, 55_42, 1_24, 3_02, 5_18, 1_11, 2_68, 29_56, 1_15, 1_49, 44_27, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_39, 12_35, 27_99, 1_82_89, 1_77_80, 2_04, 1_09, 94_74, 12_96, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_lowerCamelCase, model_name='''google/bigbird-pegasus-large-arxiv''', revision='''ba85d0851d708441f91440d509690f1ab6353415''', ) @require_sentencepiece @require_tokenizers class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : str = PegasusTokenizer A_ : Union[str, Any] = PegasusTokenizerFast A_ : Any = True A_ : str = True def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing __A = PegasusTokenizer(_lowerCamelCase, offset=0, mask_token_sent=_lowerCamelCase, mask_token='''[MASK]''' ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' ) def _SCREAMING_SNAKE_CASE ( self : Optional[int], **_lowerCamelCase : Dict ): '''simple docstring''' return PegasusTokenizer.from_pretrained(self.tmpdirname, **_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : List[str] ): '''simple docstring''' return ("This is a test", "This is a test") def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' __A = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) __A = self.tokenizer_class.from_pretrained(self.tmpdirname ) __A = ( '''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>''' ''' <pad> <pad> <pad>''' ) __A = rust_tokenizer([raw_input_str], return_tensors=_lowerCamelCase, add_special_tokens=_lowerCamelCase ).input_ids[0] __A = py_tokenizer([raw_input_str], return_tensors=_lowerCamelCase, add_special_tokens=_lowerCamelCase ).input_ids[0] self.assertListEqual(_lowerCamelCase, _lowerCamelCase ) @require_torch def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' __A = ['''This is going to be way too long.''' * 10_00, '''short example'''] __A = ['''not super long but more than 5 tokens''', '''tiny'''] __A = self._large_tokenizer(_lowerCamelCase, padding=_lowerCamelCase, truncation=_lowerCamelCase, return_tensors='''pt''' ) __A = self._large_tokenizer( text_target=_lowerCamelCase, max_length=5, padding=_lowerCamelCase, truncation=_lowerCamelCase, return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 40_96) assert batch.attention_mask.shape == (2, 40_96) assert targets["input_ids"].shape == (2, 5) assert len(_lowerCamelCase ) == 2 # input_ids, attention_mask. def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' __A = ( '''This is an example string that is used to test the original TF implementation against the HF''' ''' implementation''' ) __A = self._large_tokenizer(_lowerCamelCase ).input_ids self.assertListEqual( _lowerCamelCase, [1_82, 1_17, 1_42, 5_87, 42_11, 1_20, 1_17, 2_63, 1_12, 8_04, 1_09, 8_56, 2_50_16, 31_37, 4_64, 1_09, 2_69_55, 31_37, 1], )
266
1
"""simple docstring""" import random import unittest import numpy as np import transformers from transformers import is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax if is_flax_available(): import os import jax.numpy as jnp from jax import jit from transformers import AutoTokenizer, FlaxAutoModelForCausalLM from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model lowercase_ = '0.12' # assumed parallelism: 8 if is_torch_available(): import torch def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None ): """simple docstring""" if rng is None: __A = random.Random() __A = 1 for dim in shape: total_dims *= dim __A = [] for _ in range(__UpperCamelCase ): values.append(rng.randint(0 , vocab_size - 1 ) ) __A = np.array(__UpperCamelCase , dtype=jnp.intaa ).reshape(__UpperCamelCase ) return output def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase=None ): """simple docstring""" __A = ids_tensor(__UpperCamelCase , vocab_size=2 , rng=__UpperCamelCase ) # make sure that at least one token is attended to for each batch __A = 1 return attn_mask @require_flax class snake_case : '''simple docstring''' A_ : str = None A_ : Dict = () def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' __A , __A = self.model_tester.prepare_config_and_inputs_for_common() # cut to half length & take max batch_size 3 __A = 2 __A = inputs['''input_ids'''].shape[-1] // 2 __A = inputs['''input_ids'''][:max_batch_size, :sequence_length] __A = jnp.ones_like(_lowerCamelCase ) __A = attention_mask[:max_batch_size, :sequence_length] # generate max 5 tokens __A = input_ids.shape[-1] + 5 if config.eos_token_id is not None and config.pad_token_id is None: # hack to allow generate for models such as GPT2 as is done in `generate()` __A = config.eos_token_id return config, input_ids, attention_mask, max_length @is_pt_flax_cross_test def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A , __A , __A , __A = self._get_input_ids_and_config() __A = False __A = max_length __A = 0 for model_class in self.all_generative_model_classes: __A = model_class(_lowerCamelCase ) __A = model_class.__name__[4:] # Skip the "Flax" at the beginning __A = getattr(_lowerCamelCase, _lowerCamelCase ) __A = pt_model_class(_lowerCamelCase ).eval() __A = load_flax_weights_in_pytorch_model(_lowerCamelCase, flax_model.params ) __A = flax_model.generate(_lowerCamelCase ).sequences __A = pt_model.generate(torch.tensor(_lowerCamelCase, dtype=torch.long ) ) if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]: __A = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]] self.assertListEqual(pt_generation_outputs.numpy().tolist(), flax_generation_outputs.tolist() ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' __A , __A , __A , __A = self._get_input_ids_and_config() __A = False __A = max_length for model_class in self.all_generative_model_classes: __A = model_class(_lowerCamelCase ) __A = model.generate(_lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1], _lowerCamelCase ) __A = jit(model.generate ) __A = jit_generate(_lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' __A , __A , __A , __A = self._get_input_ids_and_config() __A = True __A = max_length for model_class in self.all_generative_model_classes: __A = model_class(_lowerCamelCase ) __A = model.generate(_lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1], _lowerCamelCase ) __A = jit(model.generate ) __A = jit_generate(_lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() ) def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' __A , __A , __A , __A = self._get_input_ids_and_config() __A = False __A = max_length __A = 2 for model_class in self.all_generative_model_classes: __A = model_class(_lowerCamelCase ) __A = model.generate(_lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1], _lowerCamelCase ) __A = jit(model.generate ) __A = jit_generate(_lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() ) def _SCREAMING_SNAKE_CASE ( self : List[str] ): '''simple docstring''' __A , __A , __A , __A = self._get_input_ids_and_config() __A = False __A = max_length __A = 2 __A = 2 for model_class in self.all_generative_model_classes: __A = model_class(_lowerCamelCase ) __A = model.generate(_lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[0], input_ids.shape[0] * config.num_return_sequences ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' __A , __A , __A , __A = self._get_input_ids_and_config() __A = True __A = max_length __A = 0.8 __A = 10 __A = 0.3 __A = 1 __A = 8 __A = 9 for model_class in self.all_generative_model_classes: __A = model_class(_lowerCamelCase ) __A = model.generate(_lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1], _lowerCamelCase ) __A = jit(model.generate ) __A = jit_generate(_lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() ) def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' __A , __A , __A , __A = self._get_input_ids_and_config() __A = max_length __A = 1 __A = 8 __A = 9 for model_class in self.all_generative_model_classes: __A = model_class(_lowerCamelCase ) __A = model.generate(_lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1], _lowerCamelCase ) __A = jit(model.generate ) __A = jit_generate(_lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() ) def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' __A , __A , __A , __A = self._get_input_ids_and_config() __A = max_length __A = 2 __A = 1 __A = 8 __A = 9 for model_class in self.all_generative_model_classes: __A = model_class(_lowerCamelCase ) __A = model.generate(_lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1], _lowerCamelCase ) __A = jit(model.generate ) __A = jit_generate(_lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() ) def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' __A , __A , __A , __A = self._get_input_ids_and_config() # pad attention mask on the left __A = attention_mask.at[(0, 0)].set(0 ) __A = False __A = max_length for model_class in self.all_generative_model_classes: __A = model_class(_lowerCamelCase ) __A = model.generate(_lowerCamelCase, attention_mask=_lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1], _lowerCamelCase ) __A = jit(model.generate ) __A = jit_generate(_lowerCamelCase, attention_mask=_lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() ) def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' __A , __A , __A , __A = self._get_input_ids_and_config() # pad attention mask on the left __A = attention_mask.at[(0, 0)].set(0 ) __A = True __A = max_length for model_class in self.all_generative_model_classes: __A = model_class(_lowerCamelCase ) __A = model.generate(_lowerCamelCase, attention_mask=_lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1], _lowerCamelCase ) __A = jit(model.generate ) __A = jit_generate(_lowerCamelCase, attention_mask=_lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A , __A , __A , __A = self._get_input_ids_and_config() # pad attention mask on the left __A = attention_mask.at[(0, 0)].set(0 ) __A = 2 __A = max_length for model_class in self.all_generative_model_classes: __A = model_class(_lowerCamelCase ) __A = model.generate(_lowerCamelCase, attention_mask=_lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1], _lowerCamelCase ) __A = jit(model.generate ) __A = jit_generate(_lowerCamelCase, attention_mask=_lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() ) @require_flax class snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''' ) __A = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) __A = '''Hello world''' __A = tokenizer(_lowerCamelCase, return_tensors='''np''' ).input_ids # typos are quickly detected (the correct argument is `do_sample`) with self.assertRaisesRegex(_lowerCamelCase, '''do_samples''' ): model.generate(_lowerCamelCase, do_samples=_lowerCamelCase ) # arbitrary arguments that will not be used anywhere are also not accepted with self.assertRaisesRegex(_lowerCamelCase, '''foo''' ): __A = {'''foo''': '''bar'''} model.generate(_lowerCamelCase, **_lowerCamelCase )
266
"""simple docstring""" import re def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" return [char.split() for char in re.split(r'''[^ a-z A-Z 0-9 \s]''' , str_ )] def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = split_input(str_ ) return "".join( [''''''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" try: __A = split_input(__UpperCamelCase ) if upper: __A = ''''''.join( [ separator.join([char.upper() for char in sub_str] ) for sub_str in string_split ] ) else: __A = ''''''.join( [ separator.join([char.lower() for char in sub_str] ) for sub_str in string_split ] ) return res_str except IndexError: return "not valid string" def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" return to_simple_case(__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" try: __A = to_simple_case(__UpperCamelCase ) return res_str[0].lower() + res_str[1:] except IndexError: return "not valid string" def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" return to_complex_case(__UpperCamelCase , __UpperCamelCase , '''_''' ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" return to_complex_case(__UpperCamelCase , __UpperCamelCase , '''-''' ) if __name__ == "__main__": __import__('doctest').testmod()
266
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { 'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json', 'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json', 'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json', 'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json', 'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json', 'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json', 'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json', 'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json', 'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json', } class snake_case ( _lowerCAmelCase ): '''simple docstring''' A_ : List[str] = "xmod" def __init__( self : str, _lowerCamelCase : List[str]=3_05_22, _lowerCamelCase : Dict=7_68, _lowerCamelCase : Dict=12, _lowerCamelCase : int=12, _lowerCamelCase : Union[str, Any]=30_72, _lowerCamelCase : Dict="gelu", _lowerCamelCase : Optional[Any]=0.1, _lowerCamelCase : Union[str, Any]=0.1, _lowerCamelCase : Dict=5_12, _lowerCamelCase : str=2, _lowerCamelCase : Union[str, Any]=0.02, _lowerCamelCase : Optional[Any]=1e-12, _lowerCamelCase : Optional[int]=1, _lowerCamelCase : int=0, _lowerCamelCase : Optional[int]=2, _lowerCamelCase : List[str]="absolute", _lowerCamelCase : Optional[Any]=True, _lowerCamelCase : Union[str, Any]=None, _lowerCamelCase : Optional[Any]=False, _lowerCamelCase : int=2, _lowerCamelCase : Dict=False, _lowerCamelCase : Any=True, _lowerCamelCase : Union[str, Any]=True, _lowerCamelCase : Optional[int]=("en_XX",), _lowerCamelCase : Dict=None, **_lowerCamelCase : Optional[int], ): '''simple docstring''' super().__init__(pad_token_id=_lowerCamelCase, bos_token_id=_lowerCamelCase, eos_token_id=_lowerCamelCase, **_lowerCamelCase ) __A = vocab_size __A = hidden_size __A = num_hidden_layers __A = num_attention_heads __A = hidden_act __A = intermediate_size __A = hidden_dropout_prob __A = attention_probs_dropout_prob __A = max_position_embeddings __A = type_vocab_size __A = initializer_range __A = layer_norm_eps __A = position_embedding_type __A = use_cache __A = classifier_dropout __A = pre_norm __A = adapter_reduction_factor __A = adapter_layer_norm __A = adapter_reuse_layer_norm __A = ln_before_adapter __A = list(_lowerCamelCase ) __A = default_language class snake_case ( _lowerCAmelCase ): '''simple docstring''' @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' if self.task == "multiple-choice": __A = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: __A = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
266
"""simple docstring""" from __future__ import annotations class snake_case : '''simple docstring''' def __init__( self : int, _lowerCamelCase : List[Any]=None ): '''simple docstring''' __A = data __A = None def __repr__( self : Union[str, Any] ): '''simple docstring''' __A = [] __A = self while temp: string_rep.append(f'{temp.data}' ) __A = temp.next return "->".join(_lowerCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" if not elements_list: raise Exception('''The Elements List is empty''' ) __A = __A = Node(elements_list[0] ) for i in range(1 , len(__UpperCamelCase ) ): __A = Node(elements_list[i] ) __A = current.next return head def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" if head_node is not None and isinstance(__UpperCamelCase , __UpperCamelCase ): print_reverse(head_node.next ) print(head_node.data ) def lowerCAmelCase ( ): """simple docstring""" from doctest import testmod testmod() __A = make_linked_list([1_4, 5_2, 1_4, 1_2, 4_3] ) print('''Linked List:''' ) print(__UpperCamelCase ) print('''Elements in Reverse:''' ) print_reverse(__UpperCamelCase ) if __name__ == "__main__": main()
266
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowercase_ = { 'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'], 'tokenization_electra': ['ElectraTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ['ElectraTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ 'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'ElectraForCausalLM', 'ElectraForMaskedLM', 'ElectraForMultipleChoice', 'ElectraForPreTraining', 'ElectraForQuestionAnswering', 'ElectraForSequenceClassification', 'ElectraForTokenClassification', 'ElectraModel', 'ElectraPreTrainedModel', 'load_tf_weights_in_electra', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ 'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFElectraForMaskedLM', 'TFElectraForMultipleChoice', 'TFElectraForPreTraining', 'TFElectraForQuestionAnswering', 'TFElectraForSequenceClassification', 'TFElectraForTokenClassification', 'TFElectraModel', 'TFElectraPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ 'FlaxElectraForCausalLM', 'FlaxElectraForMaskedLM', 'FlaxElectraForMultipleChoice', 'FlaxElectraForPreTraining', 'FlaxElectraForQuestionAnswering', 'FlaxElectraForSequenceClassification', 'FlaxElectraForTokenClassification', 'FlaxElectraModel', 'FlaxElectraPreTrainedModel', ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
266
"""simple docstring""" from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging lowercase_ = logging.get_logger(__name__) class snake_case ( _lowerCAmelCase ): '''simple docstring''' A_ : int = ["input_features", "attention_mask"] def __init__( self : Optional[Any], _lowerCamelCase : Union[str, Any]=80, _lowerCamelCase : int=1_60_00, _lowerCamelCase : Any=80, _lowerCamelCase : List[str]=0.0, _lowerCamelCase : int=True, _lowerCamelCase : Optional[Any]=True, _lowerCamelCase : Optional[int]=True, **_lowerCamelCase : List[str], ): '''simple docstring''' super().__init__(feature_size=_lowerCamelCase, sampling_rate=_lowerCamelCase, padding_value=_lowerCamelCase, **_lowerCamelCase ) __A = num_mel_bins __A = do_ceptral_normalize __A = normalize_means __A = normalize_vars __A = True def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : np.ndarray, ): '''simple docstring''' __A = waveform * (2**15) # Kaldi compliance: 16-bit signed integers __A = torch.from_numpy(_lowerCamelCase ).unsqueeze(0 ) __A = ta_kaldi.fbank(_lowerCamelCase, num_mel_bins=self.num_mel_bins, sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : np.ndarray, _lowerCamelCase : int, _lowerCamelCase : Optional[bool] = True, _lowerCamelCase : Optional[bool] = True, _lowerCamelCase : float = 0.0, ): '''simple docstring''' # make sure we normalize float32 arrays if normalize_means: __A = x[:input_length].mean(axis=0 ) __A = np.subtract(_lowerCamelCase, _lowerCamelCase ) if normalize_vars: __A = x[:input_length].std(axis=0 ) __A = np.divide(_lowerCamelCase, _lowerCamelCase ) if input_length < x.shape[0]: __A = padding_value # make sure array is in float32 __A = x.astype(np.floataa ) return x def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : List[np.ndarray], _lowerCamelCase : Optional[np.ndarray] = None ): '''simple docstring''' __A = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(_lowerCamelCase, _lowerCamelCase, self.normalize_means, self.normalize_vars, self.padding_value ) for x, n in zip(_lowerCamelCase, _lowerCamelCase ) ] def __call__( self : Optional[Any], _lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], _lowerCamelCase : Union[bool, str, PaddingStrategy] = False, _lowerCamelCase : Optional[int] = None, _lowerCamelCase : bool = False, _lowerCamelCase : Optional[int] = None, _lowerCamelCase : Optional[Union[str, TensorType]] = None, _lowerCamelCase : Optional[int] = None, _lowerCamelCase : Optional[bool] = None, **_lowerCamelCase : Optional[Any], ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of' f' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with' f' {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) __A = isinstance(_lowerCamelCase, np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'Only mono-channel audio is supported for input to {self}' ) __A = is_batched_numpy or ( isinstance(_lowerCamelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) )) ) if is_batched: __A = [np.asarray(_lowerCamelCase, dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(_lowerCamelCase, np.ndarray ): __A = np.asarray(_lowerCamelCase, dtype=np.floataa ) elif isinstance(_lowerCamelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __A = raw_speech.astype(np.floataa ) # always return batch if not is_batched: __A = [raw_speech] # extract fbank features __A = [self._extract_fbank_features(_lowerCamelCase ) for waveform in raw_speech] # convert into correct format for padding __A = BatchFeature({'''input_features''': features} ) __A = self.pad( _lowerCamelCase, padding=_lowerCamelCase, max_length=_lowerCamelCase, truncation=_lowerCamelCase, pad_to_multiple_of=_lowerCamelCase, return_attention_mask=_lowerCamelCase, **_lowerCamelCase, ) # make sure list is in array format __A = padded_inputs.get('''input_features''' ) if isinstance(input_features[0], _lowerCamelCase ): __A = [np.asarray(_lowerCamelCase, dtype=np.floataa ) for feature in input_features] __A = padded_inputs.get('''attention_mask''' ) if attention_mask is not None: __A = [np.asarray(_lowerCamelCase, dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: __A = ( np.array(_lowerCamelCase, dtype=np.intaa ) if self._get_padding_strategies(_lowerCamelCase, max_length=_lowerCamelCase ) is not PaddingStrategy.DO_NOT_PAD else None ) __A = self.normalize( padded_inputs['''input_features'''], attention_mask=_lowerCamelCase ) if return_tensors is not None: __A = padded_inputs.convert_to_tensors(_lowerCamelCase ) return padded_inputs
266
1
"""simple docstring""" import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin lowercase_ = get_tests_dir('fixtures/test_sentencepiece.model') if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right lowercase_ = 25_0004 lowercase_ = 25_0020 @require_sentencepiece @require_tokenizers class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : List[Any] = MBartTokenizer A_ : Tuple = MBartTokenizerFast A_ : Optional[int] = True A_ : Union[str, Any] = True def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing __A = MBartTokenizer(_lowerCamelCase, keep_accents=_lowerCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' __A = MBartTokenizer(_lowerCamelCase, keep_accents=_lowerCamelCase ) __A = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(_lowerCamelCase, ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_lowerCamelCase ), [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]], ) __A = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( _lowerCamelCase, [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ], ) __A = tokenizer.convert_tokens_to_ids(_lowerCamelCase ) self.assertListEqual( _lowerCamelCase, [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ], ) __A = tokenizer.convert_ids_to_tokens(_lowerCamelCase ) self.assertListEqual( _lowerCamelCase, [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ], ) def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return __A = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ): __A = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase, **_lowerCamelCase ) __A = self.tokenizer_class.from_pretrained(_lowerCamelCase, **_lowerCamelCase ) __A = tempfile.mkdtemp() __A = tokenizer_r.save_pretrained(_lowerCamelCase ) __A = tokenizer_p.save_pretrained(_lowerCamelCase ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) __A = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(_lowerCamelCase, _lowerCamelCase ) # Checks everything loads correctly in the same way __A = tokenizer_r.from_pretrained(_lowerCamelCase ) __A = tokenizer_p.from_pretrained(_lowerCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_lowerCamelCase, _lowerCamelCase ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(_lowerCamelCase ) # Save tokenizer rust, legacy_format=True __A = tempfile.mkdtemp() __A = tokenizer_r.save_pretrained(_lowerCamelCase, legacy_format=_lowerCamelCase ) __A = tokenizer_p.save_pretrained(_lowerCamelCase ) # Checks it save with the same files self.assertSequenceEqual(_lowerCamelCase, _lowerCamelCase ) # Checks everything loads correctly in the same way __A = tokenizer_r.from_pretrained(_lowerCamelCase ) __A = tokenizer_p.from_pretrained(_lowerCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_lowerCamelCase, _lowerCamelCase ) ) shutil.rmtree(_lowerCamelCase ) # Save tokenizer rust, legacy_format=False __A = tempfile.mkdtemp() __A = tokenizer_r.save_pretrained(_lowerCamelCase, legacy_format=_lowerCamelCase ) __A = tokenizer_p.save_pretrained(_lowerCamelCase ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way __A = tokenizer_r.from_pretrained(_lowerCamelCase ) __A = tokenizer_p.from_pretrained(_lowerCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_lowerCamelCase, _lowerCamelCase ) ) shutil.rmtree(_lowerCamelCase ) @require_torch @require_sentencepiece @require_tokenizers class snake_case ( unittest.TestCase ): '''simple docstring''' A_ : Union[str, Any] = "facebook/mbart-large-en-ro" A_ : int = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.", ] A_ : Optional[int] = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" " pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor" " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] A_ : Tuple = [8274, 12_7873, 2_5916, 7, 8622, 2071, 438, 6_7485, 53, 18_7895, 23, 5_1712, 2, EN_CODE] @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[str] ): '''simple docstring''' __A = MBartTokenizer.from_pretrained( cls.checkpoint_name, src_lang='''en_XX''', tgt_lang='''ro_RO''' ) __A = 1 return cls def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''], 25_00_01 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''], 25_00_04 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''], 25_00_20 ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' __A = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens, _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' self.assertIn(_lowerCamelCase, self.tokenizer.all_special_ids ) __A = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2] __A = self.tokenizer.decode(_lowerCamelCase, skip_special_tokens=_lowerCamelCase ) __A = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=_lowerCamelCase ) self.assertEqual(_lowerCamelCase, _lowerCamelCase ) self.assertNotIn(self.tokenizer.eos_token, _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = ['''this is gunna be a long sentence ''' * 20] assert isinstance(src_text[0], _lowerCamelCase ) __A = 10 __A = self.tokenizer(_lowerCamelCase, max_length=_lowerCamelCase, truncation=_lowerCamelCase ).input_ids[0] self.assertEqual(ids[-2], 2 ) self.assertEqual(ids[-1], _lowerCamelCase ) self.assertEqual(len(_lowerCamelCase ), _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ), [25_00_26, 25_00_01] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' __A = tempfile.mkdtemp() __A = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(_lowerCamelCase ) __A = MBartTokenizer.from_pretrained(_lowerCamelCase ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids, _lowerCamelCase ) @require_torch def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' __A = self.tokenizer(self.src_text, text_target=self.tgt_text, padding=_lowerCamelCase, return_tensors='''pt''' ) __A = shift_tokens_right(batch['''labels'''], self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE] assert batch.decoder_input_ids[1][0].tolist() == RO_CODE assert batch.decoder_input_ids[1][-1] == 2 assert batch.labels[1][-2:].tolist() == [2, RO_CODE] @require_torch def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = self.tokenizer( self.src_text, text_target=self.tgt_text, padding=_lowerCamelCase, truncation=_lowerCamelCase, max_length=len(self.expected_src_tokens ), return_tensors='''pt''', ) __A = shift_tokens_right(batch['''labels'''], self.tokenizer.pad_token_id ) self.assertIsInstance(_lowerCamelCase, _lowerCamelCase ) self.assertEqual((2, 14), batch.input_ids.shape ) self.assertEqual((2, 14), batch.attention_mask.shape ) __A = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens, _lowerCamelCase ) self.assertEqual(2, batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens, [] ) self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id, EN_CODE] ) def _SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' __A = self.tokenizer(self.src_text, padding=_lowerCamelCase, truncation=_lowerCamelCase, max_length=3, return_tensors='''pt''' ) __A = self.tokenizer( text_target=self.tgt_text, padding=_lowerCamelCase, truncation=_lowerCamelCase, max_length=10, return_tensors='''pt''' ) __A = targets['''input_ids'''] __A = shift_tokens_right(_lowerCamelCase, self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1], 3 ) self.assertEqual(batch.decoder_input_ids.shape[1], 10 ) @require_torch def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' __A = self.tokenizer._build_translation_inputs( '''A test''', return_tensors='''pt''', src_lang='''en_XX''', tgt_lang='''ar_AR''' ) self.assertEqual( nested_simplify(_lowerCamelCase ), { # A, test, EOS, en_XX '''input_ids''': [[62, 30_34, 2, 25_00_04]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 25_00_01, }, )
266
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class snake_case ( unittest.TestCase ): '''simple docstring''' def __init__( self : List[str], _lowerCamelCase : Optional[Any], _lowerCamelCase : Union[str, Any]=13, _lowerCamelCase : Any=3, _lowerCamelCase : Optional[int]=2_24, _lowerCamelCase : str=30, _lowerCamelCase : Dict=4_00, _lowerCamelCase : Union[str, Any]=True, _lowerCamelCase : Any=None, _lowerCamelCase : Optional[Any]=True, _lowerCamelCase : Any=[0.5, 0.5, 0.5], _lowerCamelCase : List[str]=[0.5, 0.5, 0.5], ): '''simple docstring''' __A = size if size is not None else {'''height''': 18, '''width''': 18} __A = parent __A = batch_size __A = num_channels __A = image_size __A = min_resolution __A = max_resolution __A = do_resize __A = size __A = do_normalize __A = image_mean __A = image_std def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : str = ViTImageProcessor if is_vision_available() else None def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = EfficientFormerImageProcessorTester(self ) @property def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' return self.image_proc_tester.prepare_image_processor_dict() def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCamelCase, '''image_mean''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''image_std''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''do_normalize''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''do_resize''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''size''' ) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' # Initialize image_processor __A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __A = prepare_image_inputs(self.image_proc_tester, equal_resolution=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, Image.Image ) # Test not batched input __A = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), ) # Test batched __A = image_processor(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), ) def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' # Initialize image_processor __A = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __A = prepare_image_inputs(self.image_proc_tester, equal_resolution=_lowerCamelCase, numpify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, np.ndarray ) # Test not batched input __A = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), ) # Test batched __A = image_processor(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), ) def _SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' # Initialize image_processor __A = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __A = prepare_image_inputs(self.image_proc_tester, equal_resolution=_lowerCamelCase, torchify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, torch.Tensor ) # Test not batched input __A = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), ) # Test batched __A = image_processor(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), )
266
1
"""simple docstring""" # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase_ = {'configuration_timm_backbone': ['TimmBackboneConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ['TimmBackbone'] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
266
"""simple docstring""" import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor lowercase_ = logging.get_logger(__name__) class snake_case ( _lowerCAmelCase ): '''simple docstring''' def __init__( self : Optional[int], *_lowerCamelCase : Union[str, Any], **_lowerCamelCase : Dict ): '''simple docstring''' warnings.warn( '''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use SegformerImageProcessor instead.''', _lowerCamelCase, ) super().__init__(*_lowerCamelCase, **_lowerCamelCase )
266
1
"""simple docstring""" from math import factorial class snake_case : '''simple docstring''' def __init__( self : Union[str, Any], _lowerCamelCase : str, _lowerCamelCase : int ): '''simple docstring''' __A = real if isinstance(_lowerCamelCase, _lowerCamelCase ): __A = [1] * rank else: __A = rank def __repr__( self : Tuple ): '''simple docstring''' return ( f'{self.real}+' f'{"+".join(str(_lowerCamelCase )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}' ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = self.duals.copy() while cur[-1] == 0: cur.pop(-1 ) return Dual(self.real, _lowerCamelCase ) def __add__( self : Tuple, _lowerCamelCase : Optional[Any] ): '''simple docstring''' if not isinstance(_lowerCamelCase, _lowerCamelCase ): return Dual(self.real + other, self.duals ) __A = self.duals.copy() __A = other.duals.copy() if len(_lowerCamelCase ) > len(_lowerCamelCase ): o_dual.extend([1] * (len(_lowerCamelCase ) - len(_lowerCamelCase )) ) elif len(_lowerCamelCase ) < len(_lowerCamelCase ): s_dual.extend([1] * (len(_lowerCamelCase ) - len(_lowerCamelCase )) ) __A = [] for i in range(len(_lowerCamelCase ) ): new_duals.append(s_dual[i] + o_dual[i] ) return Dual(self.real + other.real, _lowerCamelCase ) A_ : str = __add__ def __sub__( self : Dict, _lowerCamelCase : Dict ): '''simple docstring''' return self + other * -1 def __mul__( self : int, _lowerCamelCase : int ): '''simple docstring''' if not isinstance(_lowerCamelCase, _lowerCamelCase ): __A = [] for i in self.duals: new_duals.append(i * other ) return Dual(self.real * other, _lowerCamelCase ) __A = [0] * (len(self.duals ) + len(other.duals ) + 1) for i, item in enumerate(self.duals ): for j, jtem in enumerate(other.duals ): new_duals[i + j + 1] += item * jtem for k in range(len(self.duals ) ): new_duals[k] += self.duals[k] * other.real for index in range(len(other.duals ) ): new_duals[index] += other.duals[index] * self.real return Dual(self.real * other.real, _lowerCamelCase ) A_ : Tuple = __mul__ def __truediv__( self : int, _lowerCamelCase : List[str] ): '''simple docstring''' if not isinstance(_lowerCamelCase, _lowerCamelCase ): __A = [] for i in self.duals: new_duals.append(i / other ) return Dual(self.real / other, _lowerCamelCase ) raise ValueError def __floordiv__( self : List[Any], _lowerCamelCase : int ): '''simple docstring''' if not isinstance(_lowerCamelCase, _lowerCamelCase ): __A = [] for i in self.duals: new_duals.append(i // other ) return Dual(self.real // other, _lowerCamelCase ) raise ValueError def __pow__( self : Optional[int], _lowerCamelCase : List[str] ): '''simple docstring''' if n < 0 or isinstance(_lowerCamelCase, _lowerCamelCase ): raise ValueError('''power must be a positive integer''' ) if n == 0: return 1 if n == 1: return self __A = self for _ in range(n - 1 ): x *= self return x def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" if not callable(__UpperCamelCase ): raise ValueError('''differentiate() requires a function as input for func''' ) if not isinstance(__UpperCamelCase , (float, int) ): raise ValueError('''differentiate() requires a float as input for position''' ) if not isinstance(__UpperCamelCase , __UpperCamelCase ): raise ValueError('''differentiate() requires an int as input for order''' ) __A = Dual(__UpperCamelCase , 1 ) __A = func(__UpperCamelCase ) if order == 0: return result.real return result.duals[order - 1] * factorial(__UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod() def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" return y**2 * y**4 print(differentiate(f, 9, 2))
266
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class snake_case ( unittest.TestCase ): '''simple docstring''' def __init__( self : List[Any], _lowerCamelCase : int, _lowerCamelCase : List[Any]=7, _lowerCamelCase : int=3, _lowerCamelCase : Optional[Any]=18, _lowerCamelCase : Any=30, _lowerCamelCase : str=4_00, _lowerCamelCase : int=True, _lowerCamelCase : Union[str, Any]=None, _lowerCamelCase : str=True, ): '''simple docstring''' __A = size if size is not None else {'''height''': 18, '''width''': 18} __A = parent __A = batch_size __A = num_channels __A = image_size __A = min_resolution __A = max_resolution __A = do_resize __A = size __A = apply_ocr def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : Optional[int] = LayoutLMvaImageProcessor if is_pytesseract_available() else None def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = LayoutLMvaImageProcessingTester(self ) @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' __A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCamelCase, '''do_resize''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''size''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''apply_ocr''' ) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' __A = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size, {'''height''': 18, '''width''': 18} ) __A = self.image_processing_class.from_dict(self.image_processor_dict, size=42 ) self.assertEqual(image_processor.size, {'''height''': 42, '''width''': 42} ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' # Initialize image_processing __A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, Image.Image ) # Test not batched input __A = image_processing(image_inputs[0], return_tensors='''pt''' ) self.assertEqual( encoding.pixel_values.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) self.assertIsInstance(encoding.words, _lowerCamelCase ) self.assertIsInstance(encoding.boxes, _lowerCamelCase ) # Test batched __A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' # Initialize image_processing __A = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase, numpify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, np.ndarray ) # Test not batched input __A = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) # Test batched __A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' # Initialize image_processing __A = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase, torchify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, torch.Tensor ) # Test not batched input __A = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) # Test batched __A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) def _SCREAMING_SNAKE_CASE ( self : List[str] ): '''simple docstring''' # with apply_OCR = True __A = LayoutLMvaImageProcessor() from datasets import load_dataset __A = load_dataset('''hf-internal-testing/fixtures_docvqa''', split='''test''' ) __A = Image.open(ds[0]['''file'''] ).convert('''RGB''' ) __A = image_processing(_lowerCamelCase, return_tensors='''pt''' ) self.assertEqual(encoding.pixel_values.shape, (1, 3, 2_24, 2_24) ) self.assertEqual(len(encoding.words ), len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 __A = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231 __A = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words, _lowerCamelCase ) self.assertListEqual(encoding.boxes, _lowerCamelCase ) # with apply_OCR = False __A = LayoutLMvaImageProcessor(apply_ocr=_lowerCamelCase ) __A = image_processing(_lowerCamelCase, return_tensors='''pt''' ) self.assertEqual(encoding.pixel_values.shape, (1, 3, 2_24, 2_24) )
266
1
"""simple docstring""" from __future__ import annotations import math def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" __A = u for i in range(1 , __UpperCamelCase ): __A = temp * (u - i) return temp def lowerCAmelCase ( ): """simple docstring""" __A = int(input('''enter the numbers of values: ''' ) ) __A = [] for _ in range(__UpperCamelCase ): y.append([] ) for i in range(__UpperCamelCase ): for j in range(__UpperCamelCase ): y[i].append(__UpperCamelCase ) __A = 0 print('''enter the values of parameters in a list: ''' ) __A = list(map(__UpperCamelCase , input().split() ) ) print('''enter the values of corresponding parameters: ''' ) for i in range(__UpperCamelCase ): __A = float(input() ) __A = int(input('''enter the value to interpolate: ''' ) ) __A = (value - x[0]) / (x[1] - x[0]) # for calculating forward difference table for i in range(1 , __UpperCamelCase ): for j in range(n - i ): __A = y[j + 1][i - 1] - y[j][i - 1] __A = y[0][0] for i in range(1 , __UpperCamelCase ): summ += (ucal(__UpperCamelCase , __UpperCamelCase ) * y[0][i]) / math.factorial(__UpperCamelCase ) print(f'the value at {value} is {summ}' ) if __name__ == "__main__": main()
266
"""simple docstring""" import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class snake_case ( ctypes.Structure ): '''simple docstring''' A_ : List[str] = [("size", ctypes.c_int), ("visible", ctypes.c_byte)] def lowerCAmelCase ( ): """simple docstring""" if os.name == "nt": __A = CursorInfo() __A = ctypes.windll.kernelaa.GetStdHandle(-1_1 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) ) __A = False ctypes.windll.kernelaa.SetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) ) elif os.name == "posix": sys.stdout.write('''\033[?25l''' ) sys.stdout.flush() def lowerCAmelCase ( ): """simple docstring""" if os.name == "nt": __A = CursorInfo() __A = ctypes.windll.kernelaa.GetStdHandle(-1_1 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) ) __A = True ctypes.windll.kernelaa.SetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) ) elif os.name == "posix": sys.stdout.write('''\033[?25h''' ) sys.stdout.flush() @contextmanager def lowerCAmelCase ( ): """simple docstring""" try: hide_cursor() yield finally: show_cursor()
266
1
"""simple docstring""" from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase_ = { 'configuration_informer': [ 'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'InformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ 'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'InformerForPrediction', 'InformerModel', 'InformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_informer import ( INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, InformerForPrediction, InformerModel, InformerPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
266
"""simple docstring""" import argparse import struct import unittest class snake_case : '''simple docstring''' def __init__( self : Optional[int], _lowerCamelCase : bytes ): '''simple docstring''' __A = data # Initialize hash values __A = [ 0X6a_09e_667, 0Xbb_67a_e85, 0X3c_6ef_372, 0Xa5_4ff_53a, 0X51_0e5_27f, 0X9b_056_88c, 0X1f_83d_9ab, 0X5b_e0c_d19, ] # Initialize round constants __A = [ 0X42_8a2_f98, 0X71_374_491, 0Xb5_c0f_bcf, 0Xe9_b5d_ba5, 0X39_56c_25b, 0X59_f11_1f1, 0X92_3f8_2a4, 0Xab_1c5_ed5, 0Xd8_07a_a98, 0X12_835_b01, 0X24_318_5be, 0X55_0c7_dc3, 0X72_be5_d74, 0X80_deb_1fe, 0X9b_dc0_6a7, 0Xc1_9bf_174, 0Xe4_9b6_9c1, 0Xef_be4_786, 0X0f_c19_dc6, 0X24_0ca_1cc, 0X2d_e92_c6f, 0X4a_748_4aa, 0X5c_b0a_9dc, 0X76_f98_8da, 0X98_3e5_152, 0Xa8_31c_66d, 0Xb0_032_7c8, 0Xbf_597_fc7, 0Xc6_e00_bf3, 0Xd5_a79_147, 0X06_ca6_351, 0X14_292_967, 0X27_b70_a85, 0X2e_1b2_138, 0X4d_2c6_dfc, 0X53_380_d13, 0X65_0a7_354, 0X76_6a0_abb, 0X81_c2c_92e, 0X92_722_c85, 0Xa2_bfe_8a1, 0Xa8_1a6_64b, 0Xc2_4b8_b70, 0Xc7_6c5_1a3, 0Xd1_92e_819, 0Xd6_990_624, 0Xf4_0e3_585, 0X10_6aa_070, 0X19_a4c_116, 0X1e_376_c08, 0X27_487_74c, 0X34_b0b_cb5, 0X39_1c0_cb3, 0X4e_d8a_a4a, 0X5b_9cc_a4f, 0X68_2e6_ff3, 0X74_8f8_2ee, 0X78_a56_36f, 0X84_c87_814, 0X8c_c70_208, 0X90_bef_ffa, 0Xa4_506_ceb, 0Xbe_f9a_3f7, 0Xc6_717_8f2, ] __A = self.preprocessing(self.data ) self.final_hash() @staticmethod def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : bytes ): '''simple docstring''' __A = b'''\x80''' + (b'''\x00''' * (63 - (len(_lowerCamelCase ) + 8) % 64)) __A = struct.pack('''>Q''', (len(_lowerCamelCase ) * 8) ) return data + padding + big_endian_integer def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' # Convert into blocks of 64 bytes __A = [ self.preprocessed_data[x : x + 64] for x in range(0, len(self.preprocessed_data ), 64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers __A = list(struct.unpack('''>16L''', _lowerCamelCase ) ) # add 48 0-ed integers words += [0] * 48 __A , __A , __A , __A , __A , __A , __A , __A = self.hashes for index in range(0, 64 ): if index > 15: # modify the zero-ed indexes at the end of the array __A = ( self.ror(words[index - 15], 7 ) ^ self.ror(words[index - 15], 18 ) ^ (words[index - 15] >> 3) ) __A = ( self.ror(words[index - 2], 17 ) ^ self.ror(words[index - 2], 19 ) ^ (words[index - 2] >> 10) ) __A = ( words[index - 16] + sa + words[index - 7] + sa ) % 0X100_000_000 # Compression __A = self.ror(_lowerCamelCase, 6 ) ^ self.ror(_lowerCamelCase, 11 ) ^ self.ror(_lowerCamelCase, 25 ) __A = (e & f) ^ ((~e & 0Xff_fff_fff) & g) __A = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0X100_000_000 __A = self.ror(_lowerCamelCase, 2 ) ^ self.ror(_lowerCamelCase, 13 ) ^ self.ror(_lowerCamelCase, 22 ) __A = (a & b) ^ (a & c) ^ (b & c) __A = (sa + maj) % 0X100_000_000 __A , __A , __A , __A , __A , __A , __A , __A = ( g, f, e, ((d + tempa) % 0X100_000_000), c, b, a, ((tempa + tempa) % 0X100_000_000), ) __A = [a, b, c, d, e, f, g, h] # Modify final values __A = [ ((element + mutated_hash_values[index]) % 0X100_000_000) for index, element in enumerate(self.hashes ) ] __A = ''''''.join([hex(_lowerCamelCase )[2:].zfill(8 ) for value in self.hashes] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : int, _lowerCamelCase : int ): '''simple docstring''' return 0Xff_fff_fff & (value << (32 - rotations)) | (value >> rotations) class snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' import hashlib __A = bytes('''Test String''', '''utf-8''' ) self.assertEqual(SHAaaa(_lowerCamelCase ).hash, hashlib.shaaaa(_lowerCamelCase ).hexdigest() ) def lowerCAmelCase ( ): """simple docstring""" import doctest doctest.testmod() __A = argparse.ArgumentParser() parser.add_argument( '''-s''' , '''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , ) parser.add_argument( '''-f''' , '''--file''' , dest='''input_file''' , help='''Hash contents of a file''' ) __A = parser.parse_args() __A = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , '''rb''' ) as f: __A = f.read() else: __A = bytes(__UpperCamelCase , '''utf-8''' ) print(SHAaaa(__UpperCamelCase ).hash ) if __name__ == "__main__": main()
266
1
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING lowercase_ = logging.get_logger(__name__) class snake_case ( _lowerCAmelCase ): '''simple docstring''' A_ : Tuple = "upernet" def __init__( self : str, _lowerCamelCase : List[Any]=None, _lowerCamelCase : Optional[int]=5_12, _lowerCamelCase : int=0.02, _lowerCamelCase : str=[1, 2, 3, 6], _lowerCamelCase : str=True, _lowerCamelCase : Tuple=0.4, _lowerCamelCase : Optional[int]=3_84, _lowerCamelCase : List[str]=2_56, _lowerCamelCase : Tuple=1, _lowerCamelCase : int=False, _lowerCamelCase : Dict=2_55, **_lowerCamelCase : int, ): '''simple docstring''' super().__init__(**_lowerCamelCase ) if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) __A = CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] ) elif isinstance(_lowerCamelCase, _lowerCamelCase ): __A = backbone_config.get('''model_type''' ) __A = CONFIG_MAPPING[backbone_model_type] __A = config_class.from_dict(_lowerCamelCase ) __A = backbone_config __A = hidden_size __A = initializer_range __A = pool_scales __A = use_auxiliary_head __A = auxiliary_loss_weight __A = auxiliary_in_channels __A = auxiliary_channels __A = auxiliary_num_convs __A = auxiliary_concat_input __A = loss_ignore_index def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' __A = copy.deepcopy(self.__dict__ ) __A = self.backbone_config.to_dict() __A = self.__class__.model_type return output
266
"""simple docstring""" import faiss # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import requests # noqa: F401 # Here to have a nice missing dependency error message early on import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on from mauve import compute_mauve # From: mauve-text import datasets lowercase_ = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n' lowercase_ = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n' lowercase_ = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case ( datasets.Metric ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, homepage='''https://github.com/krishnap25/mauve''', inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { '''predictions''': datasets.Value('''string''', id='''sequence''' ), '''references''': datasets.Value('''string''', id='''sequence''' ), } ), codebase_urls=['''https://github.com/krishnap25/mauve'''], reference_urls=[ '''https://arxiv.org/abs/2102.01454''', '''https://github.com/krishnap25/mauve''', ], ) def _SCREAMING_SNAKE_CASE ( self : int, _lowerCamelCase : str, _lowerCamelCase : Optional[Any], _lowerCamelCase : Any=None, _lowerCamelCase : Tuple=None, _lowerCamelCase : Optional[Any]=None, _lowerCamelCase : Union[str, Any]=None, _lowerCamelCase : str="auto", _lowerCamelCase : Union[str, Any]=-1, _lowerCamelCase : List[str]=0.9, _lowerCamelCase : int=5, _lowerCamelCase : Tuple=5_00, _lowerCamelCase : Union[str, Any]="gpt2-large", _lowerCamelCase : int=-1, _lowerCamelCase : Union[str, Any]=10_24, _lowerCamelCase : Union[str, Any]=25, _lowerCamelCase : str=5, _lowerCamelCase : Any=True, _lowerCamelCase : Union[str, Any]=25, ): '''simple docstring''' __A = compute_mauve( p_text=_lowerCamelCase, q_text=_lowerCamelCase, p_features=_lowerCamelCase, q_features=_lowerCamelCase, p_tokens=_lowerCamelCase, q_tokens=_lowerCamelCase, num_buckets=_lowerCamelCase, pca_max_data=_lowerCamelCase, kmeans_explained_var=_lowerCamelCase, kmeans_num_redo=_lowerCamelCase, kmeans_max_iter=_lowerCamelCase, featurize_model_name=_lowerCamelCase, device_id=_lowerCamelCase, max_text_length=_lowerCamelCase, divergence_curve_discretization_size=_lowerCamelCase, mauve_scaling_factor=_lowerCamelCase, verbose=_lowerCamelCase, seed=_lowerCamelCase, ) return out
266
1
"""simple docstring""" def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = [0] * len(__UpperCamelCase ) for i in range(1 , len(__UpperCamelCase ) ): # use last results for better performance - dynamic programming __A = prefix_result[i - 1] while j > 0 and input_string[i] != input_string[j]: __A = prefix_result[j - 1] if input_string[i] == input_string[j]: j += 1 __A = j return prefix_result def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" return max(prefix_function(__UpperCamelCase ) ) if __name__ == "__main__": import doctest doctest.testmod()
266
"""simple docstring""" import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs lowercase_ = imread(R'digital_image_processing/image_data/lena_small.jpg') lowercase_ = cvtColor(img, COLOR_BGR2GRAY) def lowerCAmelCase ( ): """simple docstring""" __A = cn.convert_to_negative(__UpperCamelCase ) # assert negative_img array for at least one True assert negative_img.any() def lowerCAmelCase ( ): """simple docstring""" with Image.open('''digital_image_processing/image_data/lena_small.jpg''' ) as img: # Work around assertion for response assert str(cc.change_contrast(__UpperCamelCase , 1_1_0 ) ).startswith( '''<PIL.Image.Image image mode=RGB size=100x100 at''' ) def lowerCAmelCase ( ): """simple docstring""" __A = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def lowerCAmelCase ( ): """simple docstring""" __A = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0 ) # assert ambiguous array for all == True assert canny_img.all() __A = canny.canny(__UpperCamelCase ) # assert canny array for at least one True assert canny_array.any() def lowerCAmelCase ( ): """simple docstring""" assert gg.gaussian_filter(__UpperCamelCase , 5 , sigma=0.9 ).all() def lowerCAmelCase ( ): """simple docstring""" __A = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) __A = conv.img_convolve(__UpperCamelCase , __UpperCamelCase ).astype(__UpperCamelCase ) assert res.any() def lowerCAmelCase ( ): """simple docstring""" assert med.median_filter(__UpperCamelCase , 3 ).any() def lowerCAmelCase ( ): """simple docstring""" __A , __A = sob.sobel_filter(__UpperCamelCase ) assert grad.any() and theta.any() def lowerCAmelCase ( ): """simple docstring""" __A = sp.make_sepia(__UpperCamelCase , 2_0 ) assert sepia.all() def lowerCAmelCase ( __UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" ): """simple docstring""" __A = bs.Burkes(imread(__UpperCamelCase , 1 ) , 1_2_0 ) burkes.process() assert burkes.output_img.any() def lowerCAmelCase ( __UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" , ): """simple docstring""" __A = rs.NearestNeighbour(imread(__UpperCamelCase , 1 ) , 4_0_0 , 2_0_0 ) nn.process() assert nn.output.any() def lowerCAmelCase ( ): """simple docstring""" __A = '''digital_image_processing/image_data/lena.jpg''' # Reading the image and converting it to grayscale. __A = imread(__UpperCamelCase , 0 ) # Test for get_neighbors_pixel function() return not None __A = 0 __A = 0 __A = image[x_coordinate][y_coordinate] __A = lbp.get_neighbors_pixel( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image __A = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): __A = lbp.local_binary_value(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) assert lbp_image.any()
266
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'} class snake_case ( _lowerCAmelCase ): '''simple docstring''' A_ : Dict = "ctrl" A_ : Tuple = ["past_key_values"] A_ : List[Any] = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : Union[str, Any], _lowerCamelCase : List[str]=24_65_34, _lowerCamelCase : Dict=2_56, _lowerCamelCase : Dict=12_80, _lowerCamelCase : int=81_92, _lowerCamelCase : Optional[int]=48, _lowerCamelCase : Tuple=16, _lowerCamelCase : Tuple=0.1, _lowerCamelCase : Optional[Any]=0.1, _lowerCamelCase : Dict=1e-6, _lowerCamelCase : List[Any]=0.02, _lowerCamelCase : Tuple=True, **_lowerCamelCase : Union[str, Any], ): '''simple docstring''' __A = vocab_size __A = n_positions __A = n_embd __A = n_layer __A = n_head __A = dff __A = resid_pdrop __A = embd_pdrop __A = layer_norm_epsilon __A = initializer_range __A = use_cache super().__init__(**_lowerCamelCase )
266
"""simple docstring""" import itertools import random import unittest import numpy as np from transformers import ASTFeatureExtractor from transformers.testing_utils import require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin lowercase_ = random.Random() if is_torch_available(): import torch def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase=1.0 , __UpperCamelCase=None , __UpperCamelCase=None ): """simple docstring""" if rng is None: __A = global_rng __A = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class snake_case ( unittest.TestCase ): '''simple docstring''' def __init__( self : Any, _lowerCamelCase : List[str], _lowerCamelCase : Any=7, _lowerCamelCase : Optional[int]=4_00, _lowerCamelCase : Optional[int]=20_00, _lowerCamelCase : Dict=1, _lowerCamelCase : Optional[Any]=0.0, _lowerCamelCase : int=1_60_00, _lowerCamelCase : Optional[int]=True, _lowerCamelCase : Dict=True, ): '''simple docstring''' __A = parent __A = batch_size __A = min_seq_length __A = max_seq_length __A = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) __A = feature_size __A = padding_value __A = sampling_rate __A = return_attention_mask __A = do_normalize def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : Optional[Any]=False, _lowerCamelCase : int=False ): '''simple docstring''' def _flatten(_lowerCamelCase : List[str] ): return list(itertools.chain(*_lowerCamelCase ) ) if equal_length: __A = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size __A = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff ) ] if numpify: __A = [np.asarray(_lowerCamelCase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : int = ASTFeatureExtractor def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' __A = ASTFeatureExtractionTester(self ) def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' # Tests that all call wrap to encode_plus and batch_encode_plus __A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 __A = [floats_list((1, x) )[0] for x in range(8_00, 14_00, 2_00 )] __A = [np.asarray(_lowerCamelCase ) for speech_input in speech_inputs] # Test not batched input __A = feat_extract(speech_inputs[0], return_tensors='''np''' ).input_values __A = feat_extract(np_speech_inputs[0], return_tensors='''np''' ).input_values self.assertTrue(np.allclose(_lowerCamelCase, _lowerCamelCase, atol=1e-3 ) ) # Test batched __A = feat_extract(_lowerCamelCase, padding=_lowerCamelCase, return_tensors='''np''' ).input_values __A = feat_extract(_lowerCamelCase, padding=_lowerCamelCase, return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(_lowerCamelCase, _lowerCamelCase ): self.assertTrue(np.allclose(_lowerCamelCase, _lowerCamelCase, atol=1e-3 ) ) # Test 2-D numpy arrays are batched. __A = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)] __A = np.asarray(_lowerCamelCase ) __A = feat_extract(_lowerCamelCase, return_tensors='''np''' ).input_values __A = feat_extract(_lowerCamelCase, return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(_lowerCamelCase, _lowerCamelCase ): self.assertTrue(np.allclose(_lowerCamelCase, _lowerCamelCase, atol=1e-3 ) ) @require_torch def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' import torch __A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __A = np.random.rand(1_00 ).astype(np.floataa ) __A = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: __A = feature_extractor.pad([{'''input_values''': inputs}], return_tensors='''np''' ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) __A = feature_extractor.pad([{'''input_values''': inputs}], return_tensors='''pt''' ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : Union[str, Any] ): '''simple docstring''' from datasets import load_dataset __A = load_dataset('''hf-internal-testing/librispeech_asr_dummy''', '''clean''', split='''validation''' ) # automatic decoding with librispeech __A = ds.sort('''id''' ).select(range(_lowerCamelCase ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] @require_torch def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' # fmt: off __A = torch.tensor( [-0.98_94, -1.27_76, -0.90_66, -1.27_76, -0.93_49, -1.26_09, -1.03_86, -1.27_76, -1.15_61, -1.27_76, -1.20_52, -1.27_23, -1.21_90, -1.21_32, -1.27_76, -1.11_33, -1.19_53, -1.13_43, -1.15_84, -1.22_03, -1.17_70, -1.24_74, -1.23_81, -1.19_36, -0.92_70, -0.83_17, -0.80_49, -0.77_06, -0.75_65, -0.78_69] ) # fmt: on __A = self._load_datasamples(1 ) __A = ASTFeatureExtractor() __A = feature_extractor(_lowerCamelCase, return_tensors='''pt''' ).input_values self.assertEquals(input_values.shape, (1, 10_24, 1_28) ) self.assertTrue(torch.allclose(input_values[0, 0, :30], _lowerCamelCase, atol=1e-4 ) )
266
1
"""simple docstring""" from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import ( BaseOutput, OptionalDependencyNotAvailable, is_flax_available, is_k_diffusion_available, is_k_diffusion_version, is_onnx_available, is_torch_available, is_transformers_available, is_transformers_version, ) @dataclass class snake_case ( _lowerCAmelCase ): '''simple docstring''' A_ : Union[List[PIL.Image.Image], np.ndarray] A_ : Optional[List[bool]] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_cycle_diffusion import CycleDiffusionPipeline from .pipeline_stable_diffusion import StableDiffusionPipeline from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline from .pipeline_stable_unclip import StableUnCLIPPipeline from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline from .safety_checker import StableDiffusionSafetyChecker from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline else: from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.26.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( StableDiffusionDepthaImgPipeline, StableDiffusionDiffEditPipeline, StableDiffusionPixaPixZeroPipeline, ) else: from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline try: if not ( is_torch_available() and is_transformers_available() and is_k_diffusion_available() and is_k_diffusion_version('>=', '0.0.12') ): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403 else: from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline try: if not (is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_onnx_objects import * # noqa F403 else: from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline if is_transformers_available() and is_flax_available(): import flax @flax.struct.dataclass class snake_case ( _lowerCAmelCase ): '''simple docstring''' A_ : np.ndarray A_ : List[bool] from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
266
"""simple docstring""" def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = current_set.copy() for row_index, row in enumerate(__UpperCamelCase ): __A = row[0] for column_index, column in enumerate(__UpperCamelCase ): if magnitude == 0: __A = column continue __A = column / magnitude # Subtract to cancel term __A = current_set[0] __A = [first_row] __A = current_set[1::] for row in current_set: __A = [] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(__UpperCamelCase ) continue for column_index in range(len(__UpperCamelCase ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(__UpperCamelCase ) # Create next recursion iteration set if len(final_set[0] ) != 3: __A = final_set[0] __A = [] __A = [] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) __A = simplify(__UpperCamelCase ) for i in range(len(__UpperCamelCase ) ): resultant[i].insert(0 , current_first_column[i] ) resultant.insert(0 , __UpperCamelCase ) __A = resultant return final_set def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" if len(__UpperCamelCase ) == 0: raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) __A = len(__UpperCamelCase ) + 1 if any(len(__UpperCamelCase ) != _length for item in equations ): raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) for row in equations: if any(not isinstance(__UpperCamelCase , (int, float) ) for column in row ): raise ValueError('''solve_simultaneous() requires lists of integers''' ) if len(__UpperCamelCase ) == 1: return [equations[0][-1] / equations[0][0]] __A = equations.copy() if any(0 in row for row in data_set ): __A = data_set.copy() __A = [] for row_index, row in enumerate(__UpperCamelCase ): if 0 not in row: __A = data_set.pop(__UpperCamelCase ) break if not full_row: raise ValueError('''solve_simultaneous() requires at least 1 full equation''' ) data_set.insert(0 , __UpperCamelCase ) __A = data_set.copy() __A = simplify(__UpperCamelCase ) __A = simplified[::-1] __A = [] for row in simplified: __A = row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue __A = row.copy()[: len(__UpperCamelCase ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(__UpperCamelCase ) == 0: solutions.append(0 ) continue __A = temp_row[1::] __A = temp_row[::-1] for column_index, column in enumerate(__UpperCamelCase ): current_solution -= column * solutions[column_index] solutions.append(__UpperCamelCase ) __A = [] for item in solutions: final.append(float(round(__UpperCamelCase , 5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() lowercase_ = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
266
1
"""simple docstring""" from __future__ import annotations class snake_case : '''simple docstring''' def __init__( self : List[str], _lowerCamelCase : str, _lowerCamelCase : str ): '''simple docstring''' __A , __A = text, pattern __A , __A = len(_lowerCamelCase ), len(_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : int, _lowerCamelCase : str ): '''simple docstring''' for i in range(self.patLen - 1, -1, -1 ): if char == self.pattern[i]: return i return -1 def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : int ): '''simple docstring''' for i in range(self.patLen - 1, -1, -1 ): if self.pattern[i] != self.text[current_pos + i]: return current_pos + i return -1 def _SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' # searches pattern in text and returns index positions __A = [] for i in range(self.textLen - self.patLen + 1 ): __A = self.mismatch_in_text(_lowerCamelCase ) if mismatch_index == -1: positions.append(_lowerCamelCase ) else: __A = self.match_in_pattern(self.text[mismatch_index] ) __A = ( mismatch_index - match_index ) # shifting index lgtm [py/multiple-definition] return positions lowercase_ = 'ABAABA' lowercase_ = 'AB' lowercase_ = BoyerMooreSearch(text, pattern) lowercase_ = bms.bad_character_heuristic() if len(positions) == 0: print('No match found') else: print('Pattern found in following positions: ') print(positions)
266
"""simple docstring""" from __future__ import annotations from typing import Any def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" if not postfix_notation: return 0 __A = {'''+''', '''-''', '''*''', '''/'''} __A = [] for token in postfix_notation: if token in operations: __A , __A = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(__UpperCamelCase ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
266
1
"""simple docstring""" import gc import unittest import numpy as np import torch from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : Dict = DanceDiffusionPipeline A_ : List[Any] = UNCONDITIONAL_AUDIO_GENERATION_PARAMS A_ : int = PipelineTesterMixin.required_optional_params - { "callback", "latents", "callback_steps", "output_type", "num_images_per_prompt", } A_ : Tuple = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS A_ : Dict = False A_ : Optional[int] = False def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' torch.manual_seed(0 ) __A = UNetaDModel( block_out_channels=(32, 32, 64), extra_in_channels=16, sample_size=5_12, sample_rate=1_60_00, in_channels=2, out_channels=2, flip_sin_to_cos=_lowerCamelCase, use_timestep_embedding=_lowerCamelCase, time_embedding_type='''fourier''', mid_block_type='''UNetMidBlock1D''', down_block_types=('''DownBlock1DNoSkip''', '''DownBlock1D''', '''AttnDownBlock1D'''), up_block_types=('''AttnUpBlock1D''', '''UpBlock1D''', '''UpBlock1DNoSkip'''), ) __A = IPNDMScheduler() __A = { '''unet''': unet, '''scheduler''': scheduler, } return components def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : Optional[Any], _lowerCamelCase : str=0 ): '''simple docstring''' if str(_lowerCamelCase ).startswith('''mps''' ): __A = torch.manual_seed(_lowerCamelCase ) else: __A = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase ) __A = { '''batch_size''': 1, '''generator''': generator, '''num_inference_steps''': 4, } return inputs def _SCREAMING_SNAKE_CASE ( self : List[str] ): '''simple docstring''' __A = '''cpu''' # ensure determinism for the device-dependent torch.Generator __A = self.get_dummy_components() __A = DanceDiffusionPipeline(**_lowerCamelCase ) __A = pipe.to(_lowerCamelCase ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) __A = self.get_dummy_inputs(_lowerCamelCase ) __A = pipe(**_lowerCamelCase ) __A = output.audios __A = audio[0, -3:, -3:] assert audio.shape == (1, 2, components["unet"].sample_size) __A = np.array([-0.72_65, 1.00_00, -0.83_88, 0.11_75, 0.94_98, -1.00_00] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2 @skip_mps def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' return super().test_save_load_local() @skip_mps def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 ) @skip_mps def _SCREAMING_SNAKE_CASE ( self : List[str] ): '''simple docstring''' return super().test_save_load_optional_components() @skip_mps def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' return super().test_attention_slicing_forward_pass() def _SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = torch_device __A = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' ) __A = pipe.to(_lowerCamelCase ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) __A = torch.manual_seed(0 ) __A = pipe(generator=_lowerCamelCase, num_inference_steps=1_00, audio_length_in_s=4.0_96 ) __A = output.audios __A = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) __A = np.array([-0.01_92, -0.02_31, -0.03_18, -0.00_59, 0.00_02, -0.00_20] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' __A = torch_device __A = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''', torch_dtype=torch.floataa ) __A = pipe.to(_lowerCamelCase ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) __A = torch.manual_seed(0 ) __A = pipe(generator=_lowerCamelCase, num_inference_steps=1_00, audio_length_in_s=4.0_96 ) __A = output.audios __A = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) __A = np.array([-0.03_67, -0.04_88, -0.07_71, -0.05_25, -0.04_44, -0.03_41] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
266
"""simple docstring""" import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class snake_case ( unittest.TestCase ): '''simple docstring''' def __init__( self : Optional[Any], _lowerCamelCase : Tuple, _lowerCamelCase : List[str]=13, _lowerCamelCase : Optional[Any]=7, _lowerCamelCase : Optional[int]=True, _lowerCamelCase : int=True, _lowerCamelCase : List[str]=True, _lowerCamelCase : Optional[Any]=True, _lowerCamelCase : int=99, _lowerCamelCase : Optional[int]=32, _lowerCamelCase : Tuple=5, _lowerCamelCase : Tuple=4, _lowerCamelCase : str=37, _lowerCamelCase : Union[str, Any]="gelu", _lowerCamelCase : int=0.1, _lowerCamelCase : List[Any]=0.1, _lowerCamelCase : Dict=5_12, _lowerCamelCase : List[Any]=16, _lowerCamelCase : Any=2, _lowerCamelCase : Any=0.02, _lowerCamelCase : Dict=4, ): '''simple docstring''' __A = parent __A = batch_size __A = seq_length __A = is_training __A = use_attention_mask __A = use_token_type_ids __A = use_labels __A = vocab_size __A = hidden_size __A = num_hidden_layers __A = num_attention_heads __A = intermediate_size __A = hidden_act __A = hidden_dropout_prob __A = attention_probs_dropout_prob __A = max_position_embeddings __A = type_vocab_size __A = type_sequence_label_size __A = initializer_range __A = num_choices def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' __A = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) __A = None if self.use_attention_mask: __A = random_attention_mask([self.batch_size, self.seq_length] ) __A = None if self.use_token_type_ids: __A = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size ) __A = RoFormerConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=_lowerCamelCase, initializer_range=self.initializer_range, ) return config, input_ids, token_type_ids, attention_mask def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = self.prepare_config_and_inputs() __A , __A , __A , __A = config_and_inputs __A = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask} return config, inputs_dict @require_flax class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : Dict = True A_ : Tuple = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' __A = FlaxRoFormerModelTester(self ) @slow def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' for model_class_name in self.all_model_classes: __A = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''', from_pt=_lowerCamelCase ) __A = model(np.ones((1, 1) ) ) self.assertIsNotNone(_lowerCamelCase ) @require_flax class snake_case ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' ) __A = jnp.array([[0, 1, 2, 3, 4, 5]] ) __A = model(_lowerCamelCase )[0] __A = 5_00_00 __A = (1, 6, vocab_size) self.assertEqual(output.shape, _lowerCamelCase ) __A = jnp.array( [[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3], _lowerCamelCase, atol=1e-4 ) )
266
1
"""simple docstring""" import operator as op def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = [] __A = lambda __UpperCamelCase , __UpperCamelCase : int(x / y ) # noqa: E731 integer division operation __A = { '''^''': op.pow, '''*''': op.mul, '''/''': div, '''+''': op.add, '''-''': op.sub, } # operators & their respective operation # print table header print('''Symbol'''.center(8 ) , '''Action'''.center(1_2 ) , '''Stack''' , sep=''' | ''' ) print('''-''' * (3_0 + len(__UpperCamelCase )) ) for x in post_fix: if x.isdigit(): # if x in digit stack.append(__UpperCamelCase ) # append x to stack # output in tabular format print(x.rjust(8 ) , ('''push(''' + x + ''')''').ljust(1_2 ) , ''','''.join(__UpperCamelCase ) , sep=''' | ''' ) else: __A = stack.pop() # pop stack # output in tabular format print(''''''.rjust(8 ) , ('''pop(''' + b + ''')''').ljust(1_2 ) , ''','''.join(__UpperCamelCase ) , sep=''' | ''' ) __A = stack.pop() # pop stack # output in tabular format print(''''''.rjust(8 ) , ('''pop(''' + a + ''')''').ljust(1_2 ) , ''','''.join(__UpperCamelCase ) , sep=''' | ''' ) stack.append( str(opr[x](int(__UpperCamelCase ) , int(__UpperCamelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack # output in tabular format print( x.rjust(8 ) , ('''push(''' + a + x + b + ''')''').ljust(1_2 ) , ''','''.join(__UpperCamelCase ) , sep=''' | ''' , ) return int(stack[0] ) if __name__ == "__main__": lowercase_ = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ') print('\n\tResult = ', solve(Postfix))
266
"""simple docstring""" from collections import defaultdict from math import ceil, sqrt def lowerCAmelCase ( __UpperCamelCase = 1_0_0_0_0_0_0 , __UpperCamelCase = 1_0 ): """simple docstring""" __A = defaultdict(__UpperCamelCase ) for outer_width in range(3 , (t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: __A = max( ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 ) else: __A = 1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(__UpperCamelCase , outer_width - 1 , 2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 1_0 ) if __name__ == "__main__": print(F'''{solution() = }''')
266
1
"""simple docstring""" import os import tempfile import unittest from transformers import NezhaConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, ) from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST class snake_case : '''simple docstring''' def __init__( self : List[str], _lowerCamelCase : List[Any], _lowerCamelCase : List[str]=13, _lowerCamelCase : Optional[Any]=7, _lowerCamelCase : str=True, _lowerCamelCase : Dict=True, _lowerCamelCase : Optional[int]=True, _lowerCamelCase : Dict=True, _lowerCamelCase : Dict=99, _lowerCamelCase : Optional[int]=32, _lowerCamelCase : List[Any]=5, _lowerCamelCase : Optional[int]=4, _lowerCamelCase : Union[str, Any]=37, _lowerCamelCase : Tuple="gelu", _lowerCamelCase : Any=0.1, _lowerCamelCase : int=0.1, _lowerCamelCase : Optional[int]=1_28, _lowerCamelCase : Any=32, _lowerCamelCase : List[str]=16, _lowerCamelCase : str=2, _lowerCamelCase : List[str]=0.02, _lowerCamelCase : Any=3, _lowerCamelCase : Any=4, _lowerCamelCase : Dict=None, ): '''simple docstring''' __A = parent __A = batch_size __A = seq_length __A = is_training __A = use_input_mask __A = use_token_type_ids __A = use_labels __A = vocab_size __A = hidden_size __A = num_hidden_layers __A = num_attention_heads __A = intermediate_size __A = hidden_act __A = hidden_dropout_prob __A = attention_probs_dropout_prob __A = max_position_embeddings __A = type_vocab_size __A = type_sequence_label_size __A = initializer_range __A = num_labels __A = num_choices __A = scope def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' __A = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) __A = None if self.use_input_mask: __A = random_attention_mask([self.batch_size, self.seq_length] ) __A = None if self.use_token_type_ids: __A = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size ) __A = None __A = None __A = None if self.use_labels: __A = ids_tensor([self.batch_size], self.type_sequence_label_size ) __A = ids_tensor([self.batch_size, self.seq_length], self.num_labels ) __A = ids_tensor([self.batch_size], self.num_choices ) __A = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' return NezhaConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=_lowerCamelCase, initializer_range=self.initializer_range, ) def _SCREAMING_SNAKE_CASE ( self : List[str] ): '''simple docstring''' ( ( __A ) , ( __A ) , ( __A ) , ( __A ) , ( __A ) , ( __A ) , ( __A ) , ) = self.prepare_config_and_inputs() __A = True __A = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __A = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def _SCREAMING_SNAKE_CASE ( self : int, _lowerCamelCase : Any, _lowerCamelCase : str, _lowerCamelCase : Optional[int], _lowerCamelCase : Union[str, Any], _lowerCamelCase : Optional[int], _lowerCamelCase : Union[str, Any], _lowerCamelCase : Union[str, Any] ): '''simple docstring''' __A = NezhaModel(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() __A = model(_lowerCamelCase, attention_mask=_lowerCamelCase, token_type_ids=_lowerCamelCase ) __A = model(_lowerCamelCase, token_type_ids=_lowerCamelCase ) __A = model(_lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : Tuple, _lowerCamelCase : List[Any], _lowerCamelCase : List[Any], _lowerCamelCase : int, _lowerCamelCase : List[Any], _lowerCamelCase : Any, _lowerCamelCase : int, _lowerCamelCase : str, _lowerCamelCase : int, ): '''simple docstring''' __A = True __A = NezhaModel(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() __A = model( _lowerCamelCase, attention_mask=_lowerCamelCase, token_type_ids=_lowerCamelCase, encoder_hidden_states=_lowerCamelCase, encoder_attention_mask=_lowerCamelCase, ) __A = model( _lowerCamelCase, attention_mask=_lowerCamelCase, token_type_ids=_lowerCamelCase, encoder_hidden_states=_lowerCamelCase, ) __A = model(_lowerCamelCase, attention_mask=_lowerCamelCase, token_type_ids=_lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : Any, _lowerCamelCase : Optional[int], _lowerCamelCase : Optional[Any], _lowerCamelCase : str, _lowerCamelCase : str, _lowerCamelCase : Tuple, _lowerCamelCase : Optional[Any] ): '''simple docstring''' __A = NezhaForMaskedLM(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() __A = model(_lowerCamelCase, attention_mask=_lowerCamelCase, token_type_ids=_lowerCamelCase, labels=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : Union[str, Any], _lowerCamelCase : int, _lowerCamelCase : Optional[Any], _lowerCamelCase : str, _lowerCamelCase : Union[str, Any], _lowerCamelCase : Dict, _lowerCamelCase : str ): '''simple docstring''' __A = NezhaForNextSentencePrediction(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() __A = model( _lowerCamelCase, attention_mask=_lowerCamelCase, token_type_ids=_lowerCamelCase, labels=_lowerCamelCase, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, 2) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : Dict, _lowerCamelCase : Optional[int], _lowerCamelCase : List[Any], _lowerCamelCase : Tuple, _lowerCamelCase : str, _lowerCamelCase : str, _lowerCamelCase : Tuple ): '''simple docstring''' __A = NezhaForPreTraining(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() __A = model( _lowerCamelCase, attention_mask=_lowerCamelCase, token_type_ids=_lowerCamelCase, labels=_lowerCamelCase, next_sentence_label=_lowerCamelCase, ) self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2) ) def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : List[Any], _lowerCamelCase : Dict, _lowerCamelCase : Dict, _lowerCamelCase : Union[str, Any], _lowerCamelCase : Any, _lowerCamelCase : List[str], _lowerCamelCase : Any ): '''simple docstring''' __A = NezhaForQuestionAnswering(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() __A = model( _lowerCamelCase, attention_mask=_lowerCamelCase, token_type_ids=_lowerCamelCase, start_positions=_lowerCamelCase, end_positions=_lowerCamelCase, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : Any, _lowerCamelCase : Union[str, Any], _lowerCamelCase : int, _lowerCamelCase : Union[str, Any], _lowerCamelCase : List[Any], _lowerCamelCase : List[Any], _lowerCamelCase : List[Any] ): '''simple docstring''' __A = self.num_labels __A = NezhaForSequenceClassification(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() __A = model(_lowerCamelCase, attention_mask=_lowerCamelCase, token_type_ids=_lowerCamelCase, labels=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : Optional[int], _lowerCamelCase : Optional[Any], _lowerCamelCase : int, _lowerCamelCase : Tuple, _lowerCamelCase : str, _lowerCamelCase : str, _lowerCamelCase : List[Any] ): '''simple docstring''' __A = self.num_labels __A = NezhaForTokenClassification(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() __A = model(_lowerCamelCase, attention_mask=_lowerCamelCase, token_type_ids=_lowerCamelCase, labels=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : Tuple, _lowerCamelCase : Union[str, Any], _lowerCamelCase : int, _lowerCamelCase : Tuple, _lowerCamelCase : Optional[Any], _lowerCamelCase : Tuple, _lowerCamelCase : Dict, _lowerCamelCase : List[str] ): '''simple docstring''' __A = self.num_choices __A = NezhaForMultipleChoice(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() __A = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() __A = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() __A = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() __A = model( _lowerCamelCase, attention_mask=_lowerCamelCase, token_type_ids=_lowerCamelCase, labels=_lowerCamelCase, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) ) def _SCREAMING_SNAKE_CASE ( self : List[str] ): '''simple docstring''' __A = self.prepare_config_and_inputs() ( ( __A ) , ( __A ) , ( __A ) , ( __A ) , ( __A ) , ( __A ) , ( __A ) , ) = config_and_inputs __A = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class snake_case ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : List[Any] = ( ( NezhaModel, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, ) if is_torch_available() else () ) A_ : Optional[int] = ( { "feature-extraction": NezhaModel, "fill-mask": NezhaForMaskedLM, "question-answering": NezhaForQuestionAnswering, "text-classification": NezhaForSequenceClassification, "token-classification": NezhaForTokenClassification, "zero-shot": NezhaForSequenceClassification, } if is_torch_available() else {} ) A_ : List[Any] = True def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : Optional[Any], _lowerCamelCase : Dict, _lowerCamelCase : str=False ): '''simple docstring''' __A = super()._prepare_for_class(_lowerCamelCase, _lowerCamelCase, return_labels=_lowerCamelCase ) if return_labels: if model_class in get_values(_lowerCamelCase ): __A = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=_lowerCamelCase ) __A = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=_lowerCamelCase ) return inputs_dict def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' __A = NezhaModelTester(self ) __A = ConfigTester(self, config_class=_lowerCamelCase, hidden_size=37 ) def _SCREAMING_SNAKE_CASE ( self : List[str] ): '''simple docstring''' self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : List[str] ): '''simple docstring''' __A = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' # This regression test was failing with PyTorch < 1.3 ( ( __A ) , ( __A ) , ( __A ) , ( __A ) , ( __A ) , ( __A ) , ( __A ) , ( __A ) , ( __A ) , ) = self.model_tester.prepare_config_and_inputs_for_decoder() __A = None self.model_tester.create_and_check_model_as_decoder( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_next_sequence_prediction(*_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_lowerCamelCase ) @slow def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __A = NezhaModel.from_pretrained(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase ) @slow @require_torch_gpu def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' __A , __A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # NezhaForMultipleChoice behaves incorrectly in JIT environments. if model_class == NezhaForMultipleChoice: return __A = True __A = model_class(config=_lowerCamelCase ) __A = self._prepare_for_class(_lowerCamelCase, _lowerCamelCase ) __A = torch.jit.trace( _lowerCamelCase, (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(_lowerCamelCase, os.path.join(_lowerCamelCase, '''bert.pt''' ) ) __A = torch.jit.load(os.path.join(_lowerCamelCase, '''bert.pt''' ), map_location=_lowerCamelCase ) loaded(inputs_dict['''input_ids'''].to(_lowerCamelCase ), inputs_dict['''attention_mask'''].to(_lowerCamelCase ) ) @require_torch class snake_case ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' __A = NezhaModel.from_pretrained('''sijunhe/nezha-cn-base''' ) __A = torch.tensor([[0, 1, 2, 3, 4, 5]] ) __A = torch.tensor([[0, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __A = model(_lowerCamelCase, attention_mask=_lowerCamelCase )[0] __A = torch.Size((1, 6, 7_68) ) self.assertEqual(output.shape, _lowerCamelCase ) __A = torch.tensor([[[0.06_85, 0.24_41, 0.11_02], [0.06_00, 0.19_06, 0.13_49], [0.02_21, 0.08_19, 0.05_86]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4], _lowerCamelCase, atol=1e-4 ) ) @slow def _SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' __A = NezhaForMaskedLM.from_pretrained('''sijunhe/nezha-cn-base''' ) __A = torch.tensor([[0, 1, 2, 3, 4, 5]] ) __A = torch.tensor([[1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __A = model(_lowerCamelCase, attention_mask=_lowerCamelCase )[0] __A = torch.Size((1, 6, 2_11_28) ) self.assertEqual(output.shape, _lowerCamelCase ) __A = torch.tensor( [[-2.79_39, -1.79_02, -2.21_89], [-2.85_85, -1.89_08, -2.37_23], [-2.64_99, -1.77_50, -2.25_58]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4], _lowerCamelCase, atol=1e-4 ) )
266
"""simple docstring""" import numpy as np import torch from torch.utils.data import DataLoader from accelerate.utils.dataclasses import DistributedType class snake_case : '''simple docstring''' def __init__( self : Optional[int], _lowerCamelCase : Optional[int]=2, _lowerCamelCase : Optional[int]=3, _lowerCamelCase : int=64, _lowerCamelCase : List[str]=None ): '''simple docstring''' __A = np.random.default_rng(_lowerCamelCase ) __A = length __A = rng.normal(size=(length,) ).astype(np.floataa ) __A = a * self.x + b + rng.normal(scale=0.1, size=(length,) ).astype(np.floataa ) def __len__( self : str ): '''simple docstring''' return self.length def __getitem__( self : Dict, _lowerCamelCase : Optional[int] ): '''simple docstring''' return {"x": self.x[i], "y": self.y[i]} class snake_case ( torch.nn.Module ): '''simple docstring''' def __init__( self : Optional[Any], _lowerCamelCase : Tuple=0, _lowerCamelCase : Any=0, _lowerCamelCase : Optional[Any]=False ): '''simple docstring''' super().__init__() __A = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) __A = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) __A = True def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : Optional[Any]=None ): '''simple docstring''' if self.first_batch: print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' ) __A = False return x * self.a[0] + self.b[0] class snake_case ( torch.nn.Module ): '''simple docstring''' def __init__( self : str, _lowerCamelCase : Optional[Any]=0, _lowerCamelCase : Any=0, _lowerCamelCase : List[Any]=False ): '''simple docstring''' super().__init__() __A = torch.nn.Parameter(torch.tensor(_lowerCamelCase ).float() ) __A = torch.nn.Parameter(torch.tensor(_lowerCamelCase ).float() ) __A = True def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : List[str]=None ): '''simple docstring''' if self.first_batch: print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' ) __A = False return x * self.a + self.b def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase = 1_6 ): """simple docstring""" from datasets import load_dataset from transformers import AutoTokenizer __A = AutoTokenizer.from_pretrained('''bert-base-cased''' ) __A = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''} __A = load_dataset('''csv''' , data_files=__UpperCamelCase ) __A = datasets['''train'''].unique('''label''' ) __A = {v: i for i, v in enumerate(__UpperCamelCase )} def tokenize_function(__UpperCamelCase ): # max_length=None => use the model max length (it's actually the default) __A = tokenizer( examples['''sentence1'''] , examples['''sentence2'''] , truncation=__UpperCamelCase , max_length=__UpperCamelCase , padding='''max_length''' ) if "label" in examples: __A = [label_to_id[l] for l in examples['''label''']] return outputs # Apply the method we just defined to all the examples in all the splits of the dataset __A = datasets.map( __UpperCamelCase , batched=__UpperCamelCase , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , ) def collate_fn(__UpperCamelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(__UpperCamelCase , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' ) return tokenizer.pad(__UpperCamelCase , padding='''longest''' , return_tensors='''pt''' ) # Instantiate dataloaders. __A = DataLoader(tokenized_datasets['''train'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=2 ) __A = DataLoader(tokenized_datasets['''validation'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=1 ) return train_dataloader, eval_dataloader
266
1
"""simple docstring""" import argparse from torch import nn # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively from transformers_old.modeling_prophetnet import ( ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld, ) from transformers_old.modeling_xlm_prophetnet import ( XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld, ) from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging lowercase_ = logging.get_logger(__name__) logging.set_verbosity_info() def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" if "xprophetnet" in prophetnet_checkpoint_path: __A = XLMProphetNetForConditionalGenerationOld.from_pretrained(__UpperCamelCase ) __A , __A = XLMProphetNetForConditionalGeneration.from_pretrained( __UpperCamelCase , output_loading_info=__UpperCamelCase ) else: __A = ProphetNetForConditionalGenerationOld.from_pretrained(__UpperCamelCase ) __A , __A = ProphetNetForConditionalGeneration.from_pretrained( __UpperCamelCase , output_loading_info=__UpperCamelCase ) __A = ['''key_proj''', '''value_proj''', '''query_proj'''] __A = { '''self_attn''': '''ngram_self_attn''', '''cross_attn''': '''encoder_attn''', '''cross_attn_layer_norm''': '''encoder_attn_layer_norm''', '''feed_forward_layer_norm''': '''final_layer_norm''', '''feed_forward''': '''''', '''intermediate''': '''fc1''', '''output''': '''fc2''', '''key_proj''': '''k_proj''', '''query_proj''': '''q_proj''', '''value_proj''': '''v_proj''', '''word_embeddings''': '''embed_tokens''', '''embeddings_layer_norm''': '''emb_layer_norm''', '''relative_pos_embeddings''': '''relative_linear''', '''ngram_embeddings''': '''ngram_input_embed''', '''position_embeddings''': '''embed_positions''', } for key in loading_info["missing_keys"]: __A = key.split('''.''' ) if attributes[0] == "lm_head": __A = prophet __A = prophet_old else: __A = prophet.prophetnet __A = prophet_old.model __A = False for attribute in attributes: if attribute in mapping: __A = mapping[attribute] if not hasattr(__UpperCamelCase , __UpperCamelCase ) and len(__UpperCamelCase ) > 0: __A = attribute elif hasattr(__UpperCamelCase , __UpperCamelCase ): __A = attribute if attribute == "weight": assert old_model.weight.shape == model.weight.shape, "Shapes have to match!" __A = old_model.weight logger.info(f'{attribute} is initialized.' ) __A = True break elif attribute == "bias": assert old_model.bias.shape == model.bias.shape, "Shapes have to match!" __A = old_model.bias logger.info(f'{attribute} is initialized' ) __A = True break elif attribute in special_keys and hasattr(__UpperCamelCase , '''in_proj_weight''' ): __A = old_model.in_proj_weight.shape[0] // 3 __A = getattr(__UpperCamelCase , __UpperCamelCase ) param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match" param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match" if attribute == "query_proj": __A = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] ) __A = nn.Parameter(old_model.in_proj_bias[:embed_dim] ) elif attribute == "key_proj": __A = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] ) __A = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] ) elif attribute == "value_proj": __A = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] ) __A = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] ) __A = True break elif attribute == "position_embeddings": assert ( model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1] ), "Hidden size has to match" assert model.position_embeddings.weight.shape[0] == 5_1_2, "We want 512 position_embeddings." __A = nn.Parameter(old_model.embed_positions.weight[:5_1_2, :] ) __A = True break if attribute.isdigit(): __A = model[int(__UpperCamelCase )] __A = old_model[int(__UpperCamelCase )] else: __A = getattr(__UpperCamelCase , __UpperCamelCase ) if old_attribute == "": __A = old_model else: if not hasattr(__UpperCamelCase , __UpperCamelCase ): raise ValueError(f'{old_model} does not have {old_attribute}' ) __A = getattr(__UpperCamelCase , __UpperCamelCase ) if not is_key_init: raise ValueError(f'{key} was not correctly initialized!' ) print(f'Saving model to {pytorch_dump_folder_path}' ) prophet.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) lowercase_ = parser.parse_args() convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
266
"""simple docstring""" from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo lowercase_ = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n' lowercase_ = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n' lowercase_ = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case ( datasets.Metric ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''', id='''token''' ), id='''sequence''' ), '''references''': datasets.Sequence( datasets.Sequence(datasets.Value('''string''', id='''token''' ), id='''sequence''' ), id='''references''' ), } ), ) def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : List[List[List[str]]], _lowerCamelCase : List[List[str]], _lowerCamelCase : int = 1, _lowerCamelCase : int = 4, ): '''simple docstring''' return { "google_bleu": gleu_score.corpus_gleu( list_of_references=_lowerCamelCase, hypotheses=_lowerCamelCase, min_len=_lowerCamelCase, max_len=_lowerCamelCase ) }
266
1
"""simple docstring""" from typing import List, Optional, Tuple, Union import PIL import torch from torchvision import transforms from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput from diffusers.schedulers import DDIMScheduler from diffusers.utils import randn_tensor lowercase_ = transforms.Compose( [ transforms.Resize((256, 256)), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" if isinstance(__UpperCamelCase , torch.Tensor ): return image elif isinstance(__UpperCamelCase , PIL.Image.Image ): __A = [image] __A = [trans(img.convert('''RGB''' ) ) for img in image] __A = torch.stack(__UpperCamelCase ) return image class snake_case ( _lowerCAmelCase ): '''simple docstring''' def __init__( self : Tuple, _lowerCamelCase : Dict, _lowerCamelCase : Union[str, Any] ): '''simple docstring''' super().__init__() # make sure scheduler can always be converted to DDIM __A = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=_lowerCamelCase, scheduler=_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Tuple, _lowerCamelCase : Dict ): '''simple docstring''' if strength < 0 or strength > 1: raise ValueError(f'The value of strength should in [0.0, 1.0] but is {strength}' ) def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : Any, _lowerCamelCase : Dict, _lowerCamelCase : Optional[Any] ): '''simple docstring''' # get the original timestep using init_timestep __A = min(int(num_inference_steps * strength ), _lowerCamelCase ) __A = max(num_inference_steps - init_timestep, 0 ) __A = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : Dict, _lowerCamelCase : Union[str, Any], _lowerCamelCase : Dict, _lowerCamelCase : str, _lowerCamelCase : str, _lowerCamelCase : List[str]=None ): '''simple docstring''' if not isinstance(_lowerCamelCase, (torch.Tensor, PIL.Image.Image, list) ): raise ValueError( f'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_lowerCamelCase )}' ) __A = image.to(device=_lowerCamelCase, dtype=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) and len(_lowerCamelCase ) != batch_size: raise ValueError( f'You have passed a list of generators of length {len(_lowerCamelCase )}, but requested an effective batch' f' size of {batch_size}. Make sure the batch size matches the length of the generators.' ) __A = init_latents.shape __A = randn_tensor(_lowerCamelCase, generator=_lowerCamelCase, device=_lowerCamelCase, dtype=_lowerCamelCase ) # get latents print('''add noise to latents at timestep''', _lowerCamelCase ) __A = self.scheduler.add_noise(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) __A = init_latents return latents @torch.no_grad() def __call__( self : Dict, _lowerCamelCase : Union[torch.FloatTensor, PIL.Image.Image] = None, _lowerCamelCase : float = 0.8, _lowerCamelCase : int = 1, _lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None, _lowerCamelCase : float = 0.0, _lowerCamelCase : int = 50, _lowerCamelCase : Optional[bool] = None, _lowerCamelCase : Optional[str] = "pil", _lowerCamelCase : bool = True, ): '''simple docstring''' self.check_inputs(_lowerCamelCase ) # 2. Preprocess image __A = preprocess(_lowerCamelCase ) # 3. set timesteps self.scheduler.set_timesteps(_lowerCamelCase, device=self.device ) __A , __A = self.get_timesteps(_lowerCamelCase, _lowerCamelCase, self.device ) __A = timesteps[:1].repeat(_lowerCamelCase ) # 4. Prepare latent variables __A = self.prepare_latents(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, self.unet.dtype, self.device, _lowerCamelCase ) __A = latents # 5. Denoising loop for t in self.progress_bar(_lowerCamelCase ): # 1. predict noise model_output __A = self.unet(_lowerCamelCase, _lowerCamelCase ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 __A = self.scheduler.step( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, eta=_lowerCamelCase, use_clipped_model_output=_lowerCamelCase, generator=_lowerCamelCase, ).prev_sample __A = (image / 2 + 0.5).clamp(0, 1 ) __A = image.cpu().permute(0, 2, 3, 1 ).numpy() if output_type == "pil": __A = self.numpy_to_pil(_lowerCamelCase ) if not return_dict: return (image, latent_timestep.item()) return ImagePipelineOutput(images=_lowerCamelCase )
266
"""simple docstring""" class snake_case : '''simple docstring''' def __init__( self : List[str], _lowerCamelCase : list[int] ): '''simple docstring''' __A = len(_lowerCamelCase ) __A = [0] * len_array if len_array > 0: __A = array[0] for i in range(1, _lowerCamelCase ): __A = self.prefix_sum[i - 1] + array[i] def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : int, _lowerCamelCase : int ): '''simple docstring''' if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : int ): '''simple docstring''' __A = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(_lowerCamelCase ) return False if __name__ == "__main__": import doctest doctest.testmod()
266
1
"""simple docstring""" import math def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" if not isinstance(__UpperCamelCase , __UpperCamelCase ): __A = f'Input value of [number={number}] must be an integer' raise TypeError(__UpperCamelCase ) if number < 1: __A = f'Input value of [number={number}] must be > 0' raise ValueError(__UpperCamelCase ) elif number == 1: return 3 elif number == 2: return 5 else: __A = int(math.log(number // 3 , 2 ) ) + 2 __A = [3, 5] __A = 2 __A = 3 for block in range(1 , __UpperCamelCase ): for _ in range(__UpperCamelCase ): proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] ) proth_index += 1 increment *= 2 return proth_list[number - 1] if __name__ == "__main__": import doctest doctest.testmod() for number in range(11): lowercase_ = 0 try: lowercase_ = proth(number) except ValueError: print(F'''ValueError: there is no {number}th Proth number''') continue print(F'''The {number}th Proth number: {value}''')
266
"""simple docstring""" import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowercase_ = logging.get_logger(__name__) lowercase_ = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } lowercase_ = { 'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'}, 'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'}, 'tokenizer_config_file': { 'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json' }, } lowercase_ = {'facebook/blenderbot-3B': 128} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def lowerCAmelCase ( ): """simple docstring""" __A = ( list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) ) ) __A = bs[:] __A = 0 for b in range(2**8 ): if b not in bs: bs.append(__UpperCamelCase ) cs.append(2**8 + n ) n += 1 __A = [chr(__UpperCamelCase ) for n in cs] return dict(zip(__UpperCamelCase , __UpperCamelCase ) ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = set() __A = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __A = char return pairs class snake_case ( _lowerCAmelCase ): '''simple docstring''' A_ : Tuple = VOCAB_FILES_NAMES A_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP A_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A_ : Optional[Any] = ["input_ids", "attention_mask"] def __init__( self : Dict, _lowerCamelCase : Optional[Any], _lowerCamelCase : List[str], _lowerCamelCase : Dict="replace", _lowerCamelCase : Any="<s>", _lowerCamelCase : Optional[int]="</s>", _lowerCamelCase : Dict="</s>", _lowerCamelCase : List[Any]="<s>", _lowerCamelCase : List[str]="<unk>", _lowerCamelCase : str="<pad>", _lowerCamelCase : Any="<mask>", _lowerCamelCase : Any=False, **_lowerCamelCase : Tuple, ): '''simple docstring''' __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else bos_token __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else eos_token __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else sep_token __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else cls_token __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else unk_token __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else mask_token super().__init__( errors=_lowerCamelCase, bos_token=_lowerCamelCase, eos_token=_lowerCamelCase, unk_token=_lowerCamelCase, sep_token=_lowerCamelCase, cls_token=_lowerCamelCase, pad_token=_lowerCamelCase, mask_token=_lowerCamelCase, add_prefix_space=_lowerCamelCase, **_lowerCamelCase, ) with open(_lowerCamelCase, encoding='''utf-8''' ) as vocab_handle: __A = json.load(_lowerCamelCase ) __A = {v: k for k, v in self.encoder.items()} __A = errors # how to handle errors in decoding __A = bytes_to_unicode() __A = {v: k for k, v in self.byte_encoder.items()} with open(_lowerCamelCase, encoding='''utf-8''' ) as merges_handle: __A = merges_handle.read().split('''\n''' )[1:-1] __A = [tuple(merge.split() ) for merge in bpe_merges] __A = dict(zip(_lowerCamelCase, range(len(_lowerCamelCase ) ) ) ) __A = {} __A = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __A = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' return len(self.encoder ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' return dict(self.encoder, **self.added_tokens_encoder ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : List[Any] ): '''simple docstring''' if token in self.cache: return self.cache[token] __A = tuple(_lowerCamelCase ) __A = get_pairs(_lowerCamelCase ) if not pairs: return token while True: __A = min(_lowerCamelCase, key=lambda _lowerCamelCase : self.bpe_ranks.get(_lowerCamelCase, float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break __A , __A = bigram __A = [] __A = 0 while i < len(_lowerCamelCase ): try: __A = word.index(_lowerCamelCase, _lowerCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __A = j if word[i] == first and i < len(_lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __A = tuple(_lowerCamelCase ) __A = new_word if len(_lowerCamelCase ) == 1: break else: __A = get_pairs(_lowerCamelCase ) __A = ''' '''.join(_lowerCamelCase ) __A = word return word def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : Dict ): '''simple docstring''' __A = [] for token in re.findall(self.pat, _lowerCamelCase ): __A = ''''''.join( self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowerCamelCase ).split(''' ''' ) ) return bpe_tokens def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : Dict ): '''simple docstring''' return self.encoder.get(_lowerCamelCase, self.encoder.get(self.unk_token ) ) def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : Any ): '''simple docstring''' return self.decoder.get(_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : Dict ): '''simple docstring''' __A = ''''''.join(_lowerCamelCase ) __A = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''', errors=self.errors ) return text def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : str, _lowerCamelCase : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(_lowerCamelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __A = os.path.join( _lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) __A = os.path.join( _lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(_lowerCamelCase, '''w''', encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=_lowerCamelCase, ensure_ascii=_lowerCamelCase ) + '''\n''' ) __A = 0 with open(_lowerCamelCase, '''w''', encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda _lowerCamelCase : kv[1] ): if index != token_index: logger.warning( f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.' ''' Please check that the tokenizer is not corrupted!''' ) __A = token_index writer.write(''' '''.join(_lowerCamelCase ) + '''\n''' ) index += 1 return vocab_file, merge_file def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : List[int], _lowerCamelCase : Optional[List[int]] = None, _lowerCamelCase : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCamelCase, token_ids_a=_lowerCamelCase, already_has_special_tokens=_lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(_lowerCamelCase )) + [1] return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1] def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : List[int], _lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' __A = [self.sep_token_id] __A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : Union[str, Any], _lowerCamelCase : List[str]=False, **_lowerCamelCase : List[Any] ): '''simple docstring''' __A = kwargs.pop('''add_prefix_space''', self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(_lowerCamelCase ) > 0 and not text[0].isspace()): __A = ''' ''' + text return (text, kwargs) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : List[int], _lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' return token_ids_a + [self.eos_token_id] def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : "Conversation" ): '''simple docstring''' __A = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(''' ''' + text ) else: # Generated responses should contain them already. inputs.append(_lowerCamelCase ) __A = ''' '''.join(_lowerCamelCase ) __A = self.encode(_lowerCamelCase ) if len(_lowerCamelCase ) > self.model_max_length: __A = input_ids[-self.model_max_length :] logger.warning(f'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' ) return input_ids
266
1
"""simple docstring""" class snake_case : '''simple docstring''' def __init__( self : List[str], _lowerCamelCase : list[int] ): '''simple docstring''' __A = len(_lowerCamelCase ) __A = [0] * len_array if len_array > 0: __A = array[0] for i in range(1, _lowerCamelCase ): __A = self.prefix_sum[i - 1] + array[i] def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : int, _lowerCamelCase : int ): '''simple docstring''' if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : int ): '''simple docstring''' __A = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(_lowerCamelCase ) return False if __name__ == "__main__": import doctest doctest.testmod()
266
"""simple docstring""" import importlib import inspect import json import os import re import shutil import sys from pathlib import Path from typing import Dict, Optional, Union from urllib import request from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info from packaging import version from .. import __version__ from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging lowercase_ = ( 'https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py' ) lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name def lowerCAmelCase ( ): """simple docstring""" __A = '''https://pypi.org/pypi/diffusers/json''' __A = json.loads(request.urlopen(__UpperCamelCase ).read() )['''releases'''].keys() return sorted(__UpperCamelCase , key=lambda __UpperCamelCase : version.Version(__UpperCamelCase ) ) def lowerCAmelCase ( ): """simple docstring""" if HF_MODULES_CACHE in sys.path: return sys.path.append(__UpperCamelCase ) os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase ) __A = Path(__UpperCamelCase ) / '''__init__.py''' if not init_path.exists(): init_path.touch() def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" init_hf_modules() __A = Path(__UpperCamelCase ) / name # If the parent module does not exist yet, recursively create it. if not dynamic_module_path.parent.exists(): create_dynamic_module(dynamic_module_path.parent ) os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase ) __A = dynamic_module_path / '''__init__.py''' if not init_path.exists(): init_path.touch() def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" with open(__UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f: __A = f.read() # Imports of the form `import .xxx` __A = re.findall('''^\s*import\s+\.(\S+)\s*$''' , __UpperCamelCase , flags=re.MULTILINE ) # Imports of the form `from .xxx import yyy` relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , __UpperCamelCase , flags=re.MULTILINE ) # Unique-ify return list(set(__UpperCamelCase ) ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = False __A = [module_file] __A = [] # Let's recurse through all relative imports while not no_change: __A = [] for f in files_to_check: new_imports.extend(get_relative_imports(__UpperCamelCase ) ) __A = Path(__UpperCamelCase ).parent __A = [str(module_path / m ) for m in new_imports] __A = [f for f in new_import_files if f not in all_relative_imports] __A = [f'{f}.py' for f in new_import_files] __A = len(__UpperCamelCase ) == 0 all_relative_imports.extend(__UpperCamelCase ) return all_relative_imports def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" with open(__UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f: __A = f.read() # Imports of the form `import xxx` __A = re.findall('''^\s*import\s+(\S+)\s*$''' , __UpperCamelCase , flags=re.MULTILINE ) # Imports of the form `from xxx import yyy` imports += re.findall('''^\s*from\s+(\S+)\s+import''' , __UpperCamelCase , flags=re.MULTILINE ) # Only keep the top-level module __A = [imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )] # Unique-ify and test we got them all __A = list(set(__UpperCamelCase ) ) __A = [] for imp in imports: try: importlib.import_module(__UpperCamelCase ) except ImportError: missing_packages.append(__UpperCamelCase ) if len(__UpperCamelCase ) > 0: raise ImportError( '''This modeling file requires the following packages that were not found in your environment: ''' f'{", ".join(__UpperCamelCase )}. Run `pip install {" ".join(__UpperCamelCase )}`' ) return get_relative_imports(__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" __A = module_path.replace(os.path.sep , '''.''' ) __A = importlib.import_module(__UpperCamelCase ) if class_name is None: return find_pipeline_class(__UpperCamelCase ) return getattr(__UpperCamelCase , __UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" from ..pipelines import DiffusionPipeline __A = dict(inspect.getmembers(__UpperCamelCase , inspect.isclass ) ) __A = None for cls_name, cls in cls_members.items(): if ( cls_name != DiffusionPipeline.__name__ and issubclass(cls , __UpperCamelCase ) and cls.__module__.split('''.''' )[0] != "diffusers" ): if pipeline_class is not None: raise ValueError( f'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:' f' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in' f' {loaded_module}.' ) __A = cls return pipeline_class def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , ): """simple docstring""" __A = str(__UpperCamelCase ) __A = os.path.join(__UpperCamelCase , __UpperCamelCase ) if os.path.isfile(__UpperCamelCase ): __A = module_file_or_url __A = '''local''' elif pretrained_model_name_or_path.count('''/''' ) == 0: __A = get_diffusers_versions() # cut ".dev0" __A = '''v''' + '''.'''.join(__version__.split('''.''' )[:3] ) # retrieve github version that matches if revision is None: __A = latest_version if latest_version[1:] in available_versions else '''main''' logger.info(f'Defaulting to latest_version: {revision}.' ) elif revision in available_versions: __A = f'v{revision}' elif revision == "main": __A = revision else: raise ValueError( f'`custom_revision`: {revision} does not exist. Please make sure to choose one of' f' {", ".join(available_versions + ["main"] )}.' ) # community pipeline on GitHub __A = COMMUNITY_PIPELINES_URL.format(revision=__UpperCamelCase , pipeline=__UpperCamelCase ) try: __A = cached_download( __UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , proxies=__UpperCamelCase , resume_download=__UpperCamelCase , local_files_only=__UpperCamelCase , use_auth_token=__UpperCamelCase , ) __A = '''git''' __A = pretrained_model_name_or_path + '''.py''' except EnvironmentError: logger.error(f'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' ) raise else: try: # Load from URL or cache if already cached __A = hf_hub_download( __UpperCamelCase , __UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , proxies=__UpperCamelCase , resume_download=__UpperCamelCase , local_files_only=__UpperCamelCase , use_auth_token=__UpperCamelCase , ) __A = os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) ) except EnvironmentError: logger.error(f'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' ) raise # Check we have all the requirements in our environment __A = check_imports(__UpperCamelCase ) # Now we move the module inside our cached dynamic modules. __A = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule create_dynamic_module(__UpperCamelCase ) __A = Path(__UpperCamelCase ) / full_submodule if submodule == "local" or submodule == "git": # We always copy local files (we could hash the file to see if there was a change, and give them the name of # that hash, to only copy when there is a modification but it seems overkill for now). # The only reason we do the copy is to avoid putting too many folders in sys.path. shutil.copy(__UpperCamelCase , submodule_path / module_file ) for module_needed in modules_needed: __A = f'{module_needed}.py' shutil.copy(os.path.join(__UpperCamelCase , __UpperCamelCase ) , submodule_path / module_needed ) else: # Get the commit hash # TODO: we will get this info in the etag soon, so retrieve it from there and not here. if isinstance(__UpperCamelCase , __UpperCamelCase ): __A = use_auth_token elif use_auth_token is True: __A = HfFolder.get_token() else: __A = None __A = model_info(__UpperCamelCase , revision=__UpperCamelCase , token=__UpperCamelCase ).sha # The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the # benefit of versioning. __A = submodule_path / commit_hash __A = full_submodule + os.path.sep + commit_hash create_dynamic_module(__UpperCamelCase ) if not (submodule_path / module_file).exists(): shutil.copy(__UpperCamelCase , submodule_path / module_file ) # Make sure we also have every file with relative for module_needed in modules_needed: if not (submodule_path / module_needed).exists(): get_cached_module_file( __UpperCamelCase , f'{module_needed}.py' , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , resume_download=__UpperCamelCase , proxies=__UpperCamelCase , use_auth_token=__UpperCamelCase , revision=__UpperCamelCase , local_files_only=__UpperCamelCase , ) return os.path.join(__UpperCamelCase , __UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , **__UpperCamelCase , ): """simple docstring""" __A = get_cached_module_file( __UpperCamelCase , __UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , resume_download=__UpperCamelCase , proxies=__UpperCamelCase , use_auth_token=__UpperCamelCase , revision=__UpperCamelCase , local_files_only=__UpperCamelCase , ) return get_class_in_module(__UpperCamelCase , final_module.replace('''.py''' , '''''' ) )
266
1
"""simple docstring""" from __future__ import annotations from collections.abc import Iterator from typing import Any class snake_case : '''simple docstring''' def __init__( self : Dict, _lowerCamelCase : Any ): '''simple docstring''' __A = data __A = None class snake_case : '''simple docstring''' def __init__( self : str ): '''simple docstring''' __A = None __A = None def __iter__( self : Any ): '''simple docstring''' __A = self.head while self.head: yield node.data __A = node.next if node == self.head: break def __len__( self : int ): '''simple docstring''' return sum(1 for _ in self ) def __repr__( self : str ): '''simple docstring''' return "->".join(str(_lowerCamelCase ) for item in iter(self ) ) def _SCREAMING_SNAKE_CASE ( self : int, _lowerCamelCase : Any ): '''simple docstring''' self.insert_nth(len(self ), _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : Any ): '''simple docstring''' self.insert_nth(0, _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : int, _lowerCamelCase : Any ): '''simple docstring''' if index < 0 or index > len(self ): raise IndexError('''list index out of range.''' ) __A = Node(_lowerCamelCase ) if self.head is None: __A = new_node # first node points itself __A = __A = new_node elif index == 0: # insert at head __A = self.head __A = __A = new_node else: __A = self.head for _ in range(index - 1 ): __A = temp.next __A = temp.next __A = new_node if index == len(self ) - 1: # insert at tail __A = new_node def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' return self.delete_nth(0 ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' return self.delete_nth(len(self ) - 1 ) def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : int = 0 ): '''simple docstring''' if not 0 <= index < len(self ): raise IndexError('''list index out of range.''' ) __A = self.head if self.head == self.tail: # just one node __A = __A = None elif index == 0: # delete head node __A = self.tail.next.next __A = self.head.next else: __A = self.head for _ in range(index - 1 ): __A = temp.next __A = temp.next __A = temp.next.next if index == len(self ) - 1: # delete at tail __A = temp return delete_node.data def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' return len(self ) == 0 def lowerCAmelCase ( ): """simple docstring""" __A = CircularLinkedList() assert len(__UpperCamelCase ) == 0 assert circular_linked_list.is_empty() is True assert str(__UpperCamelCase ) == "" try: circular_linked_list.delete_front() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_tail() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_nth(-1 ) raise AssertionError except IndexError: assert True try: circular_linked_list.delete_nth(0 ) raise AssertionError except IndexError: assert True assert circular_linked_list.is_empty() is True for i in range(5 ): assert len(__UpperCamelCase ) == i circular_linked_list.insert_nth(__UpperCamelCase , i + 1 ) assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 , 6 ) ) circular_linked_list.insert_tail(6 ) assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 , 7 ) ) circular_linked_list.insert_head(0 ) assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(0 , 7 ) ) assert circular_linked_list.delete_front() == 0 assert circular_linked_list.delete_tail() == 6 assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 , 6 ) ) assert circular_linked_list.delete_nth(2 ) == 3 circular_linked_list.insert_nth(2 , 3 ) assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 , 6 ) ) assert circular_linked_list.is_empty() is False if __name__ == "__main__": import doctest doctest.testmod()
266
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : Optional[int] ): '''simple docstring''' for model_result in results.values(): for batch_size, sequence_length in zip(model_result['''bs'''], model_result['''ss'''] ): __A = model_result['''result'''][batch_size][sequence_length] self.assertIsNotNone(_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = '''sgugger/tiny-distilbert-classification''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, only_pretrain_model=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, torchscript=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == '''cpu''', '''Cant do half precision''' ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, fpaa=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = AutoConfig.from_pretrained(_lowerCamelCase ) # set architectures equal to `None` __A = None __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase, configs=[config] ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == '''cpu''', '''Can\'t do half precision''' ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], fpaa=_lowerCamelCase, multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = AutoConfig.from_pretrained(_lowerCamelCase ) __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase, configs=[config] ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' __A = '''sshleifer/tinier_bart''' __A = AutoConfig.from_pretrained(_lowerCamelCase ) __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase, configs=[config] ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = AutoConfig.from_pretrained(_lowerCamelCase ) __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase, configs=[config] ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = '''sshleifer/tinier_bart''' __A = AutoConfig.from_pretrained(_lowerCamelCase ) __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase, configs=[config] ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' with tempfile.TemporaryDirectory() as tmp_dir: __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, save_to_csv=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], inference_time_csv_file=os.path.join(_lowerCamelCase, '''inf_time.csv''' ), train_memory_csv_file=os.path.join(_lowerCamelCase, '''train_mem.csv''' ), inference_memory_csv_file=os.path.join(_lowerCamelCase, '''inf_mem.csv''' ), train_time_csv_file=os.path.join(_lowerCamelCase, '''train_time.csv''' ), env_info_csv_file=os.path.join(_lowerCamelCase, '''env.csv''' ), multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) benchmark.run() self.assertTrue(Path(os.path.join(_lowerCamelCase, '''inf_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCamelCase, '''train_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCamelCase, '''inf_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCamelCase, '''train_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCamelCase, '''env.csv''' ) ).exists() ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' def _check_summary_is_not_empty(_lowerCamelCase : List[Any] ): self.assertTrue(hasattr(_lowerCamelCase, '''sequential''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''cumulative''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''current''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''total''' ) ) with tempfile.TemporaryDirectory() as tmp_dir: __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], log_filename=os.path.join(_lowerCamelCase, '''log.txt''' ), log_print=_lowerCamelCase, trace_memory_line_by_line=_lowerCamelCase, multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(_lowerCamelCase, '''log.txt''' ) ).exists() )
266
1
"""simple docstring""" def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(__UpperCamelCase ) ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" if index == len(__UpperCamelCase ): return True # Recursive Step for i in range(__UpperCamelCase ): if valid_coloring(graph[index] , __UpperCamelCase , __UpperCamelCase ): # Color current vertex __A = i # Validate coloring if util_color(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , index + 1 ): return True # Backtrack __A = -1 return False def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" __A = [-1] * len(__UpperCamelCase ) if util_color(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , 0 ): return colored_vertices return []
266
"""simple docstring""" import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowercase_ = get_tests_dir('fixtures/test_sentencepiece_no_bos.model') @require_sentencepiece @require_tokenizers class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : Optional[Any] = PegasusTokenizer A_ : int = PegasusTokenizerFast A_ : Optional[Any] = True A_ : Union[str, Any] = True def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing __A = PegasusTokenizer(_lowerCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' return PegasusTokenizer.from_pretrained('''google/pegasus-large''' ) def _SCREAMING_SNAKE_CASE ( self : int, **_lowerCamelCase : List[Any] ): '''simple docstring''' return PegasusTokenizer.from_pretrained(self.tmpdirname, **_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : Dict ): '''simple docstring''' return ("This is a test", "This is a test") def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' __A = '''</s>''' __A = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ), _lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ), _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' __A = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0], '''<pad>''' ) self.assertEqual(vocab_keys[1], '''</s>''' ) self.assertEqual(vocab_keys[-1], '''v''' ) self.assertEqual(len(_lowerCamelCase ), 11_03 ) def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size, 11_03 ) def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) __A = self.tokenizer_class.from_pretrained(self.tmpdirname ) __A = ( '''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important''' ''' </s> <pad> <pad> <pad>''' ) __A = rust_tokenizer([raw_input_str], return_tensors=_lowerCamelCase, add_special_tokens=_lowerCamelCase ).input_ids[0] __A = py_tokenizer([raw_input_str], return_tensors=_lowerCamelCase, add_special_tokens=_lowerCamelCase ).input_ids[0] self.assertListEqual(_lowerCamelCase, _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' __A = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word __A = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.''' __A = [2, 4_13, 6_15, 1_14, 3, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1] __A = tokenizer([raw_input_str], return_tensors=_lowerCamelCase ).input_ids[0] self.assertListEqual(_lowerCamelCase, _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 9_61_03 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 1_03 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_05 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 10_24 __A = '''To ensure a smooth flow of bank resolutions.''' __A = [4_13, 6_15, 1_14, 22_91, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1] __A = tokenizer([raw_input_str], return_tensors=_lowerCamelCase ).input_ids[0] self.assertListEqual(_lowerCamelCase, _lowerCamelCase ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = ['''This is going to be way too long.''' * 1_50, '''short example'''] __A = ['''not super long but more than 5 tokens''', '''tiny'''] __A = self._large_tokenizer(_lowerCamelCase, padding=_lowerCamelCase, truncation=_lowerCamelCase, return_tensors='''pt''' ) __A = self._large_tokenizer( text_target=_lowerCamelCase, max_length=5, padding=_lowerCamelCase, truncation=_lowerCamelCase, return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 10_24) assert batch.attention_mask.shape == (2, 10_24) assert targets["input_ids"].shape == (2, 5) assert len(_lowerCamelCase ) == 2 # input_ids, attention_mask. @slow def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' # fmt: off __A = {'''input_ids''': [[3_89_79, 1_43, 1_84_85, 6_06, 1_30, 2_66_69, 8_76_86, 1_21, 5_41_89, 11_29, 1_11, 2_66_69, 8_76_86, 1_21, 91_14, 1_47_87, 1_21, 1_32_49, 1_58, 5_92, 9_56, 1_21, 1_46_21, 3_15_76, 1_43, 6_26_13, 1_08, 96_88, 9_30, 4_34_30, 1_15_62, 6_26_13, 3_04, 1_08, 1_14_43, 8_97, 1_08, 93_14, 1_74_15, 6_33_99, 1_08, 1_14_43, 76_14, 1_83_16, 1_18, 42_84, 71_48, 1_24_30, 1_43, 14_00, 2_57_03, 1_58, 1_11, 42_84, 71_48, 1_17_72, 1_43, 2_12_97, 10_64, 1_58, 1_22, 2_04, 35_06, 17_54, 11_33, 1_47_87, 15_81, 1_15, 3_32_24, 44_82, 1_11, 13_55, 1_10, 2_91_73, 3_17, 5_08_33, 1_08, 2_01_47, 9_46_65, 1_11, 7_71_98, 1_07, 1], [1_10, 6_26_13, 1_17, 6_38, 1_12, 11_33, 1_21, 2_00_98, 13_55, 7_90_50, 1_38_72, 1_35, 15_96, 5_35_41, 13_52, 1_41, 1_30_39, 55_42, 1_24, 3_02, 5_18, 1_11, 2_68, 29_56, 1_15, 1_49, 44_27, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_39, 12_35, 27_99, 1_82_89, 1_77_80, 2_04, 1_09, 94_74, 12_96, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_lowerCamelCase, model_name='''google/bigbird-pegasus-large-arxiv''', revision='''ba85d0851d708441f91440d509690f1ab6353415''', ) @require_sentencepiece @require_tokenizers class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : str = PegasusTokenizer A_ : Union[str, Any] = PegasusTokenizerFast A_ : Any = True A_ : str = True def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing __A = PegasusTokenizer(_lowerCamelCase, offset=0, mask_token_sent=_lowerCamelCase, mask_token='''[MASK]''' ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' ) def _SCREAMING_SNAKE_CASE ( self : Optional[int], **_lowerCamelCase : Dict ): '''simple docstring''' return PegasusTokenizer.from_pretrained(self.tmpdirname, **_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : List[str] ): '''simple docstring''' return ("This is a test", "This is a test") def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' __A = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) __A = self.tokenizer_class.from_pretrained(self.tmpdirname ) __A = ( '''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>''' ''' <pad> <pad> <pad>''' ) __A = rust_tokenizer([raw_input_str], return_tensors=_lowerCamelCase, add_special_tokens=_lowerCamelCase ).input_ids[0] __A = py_tokenizer([raw_input_str], return_tensors=_lowerCamelCase, add_special_tokens=_lowerCamelCase ).input_ids[0] self.assertListEqual(_lowerCamelCase, _lowerCamelCase ) @require_torch def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' __A = ['''This is going to be way too long.''' * 10_00, '''short example'''] __A = ['''not super long but more than 5 tokens''', '''tiny'''] __A = self._large_tokenizer(_lowerCamelCase, padding=_lowerCamelCase, truncation=_lowerCamelCase, return_tensors='''pt''' ) __A = self._large_tokenizer( text_target=_lowerCamelCase, max_length=5, padding=_lowerCamelCase, truncation=_lowerCamelCase, return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 40_96) assert batch.attention_mask.shape == (2, 40_96) assert targets["input_ids"].shape == (2, 5) assert len(_lowerCamelCase ) == 2 # input_ids, attention_mask. def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' __A = ( '''This is an example string that is used to test the original TF implementation against the HF''' ''' implementation''' ) __A = self._large_tokenizer(_lowerCamelCase ).input_ids self.assertListEqual( _lowerCamelCase, [1_82, 1_17, 1_42, 5_87, 42_11, 1_20, 1_17, 2_63, 1_12, 8_04, 1_09, 8_56, 2_50_16, 31_37, 4_64, 1_09, 2_69_55, 31_37, 1], )
266
1
"""simple docstring""" from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_torch_available from ...utils import OptionalDependencyNotAvailable lowercase_ = { 'configuration_gpt_neox_japanese': ['GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXJapaneseConfig'], 'tokenization_gpt_neox_japanese': ['GPTNeoXJapaneseTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ 'GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST', 'GPTNeoXJapaneseForCausalLM', 'GPTNeoXJapaneseLayer', 'GPTNeoXJapaneseModel', 'GPTNeoXJapanesePreTrainedModel', ] if TYPE_CHECKING: from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox_japanese import ( GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseLayer, GPTNeoXJapaneseModel, GPTNeoXJapanesePreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
266
"""simple docstring""" import re def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" return [char.split() for char in re.split(r'''[^ a-z A-Z 0-9 \s]''' , str_ )] def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = split_input(str_ ) return "".join( [''''''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" try: __A = split_input(__UpperCamelCase ) if upper: __A = ''''''.join( [ separator.join([char.upper() for char in sub_str] ) for sub_str in string_split ] ) else: __A = ''''''.join( [ separator.join([char.lower() for char in sub_str] ) for sub_str in string_split ] ) return res_str except IndexError: return "not valid string" def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" return to_simple_case(__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" try: __A = to_simple_case(__UpperCamelCase ) return res_str[0].lower() + res_str[1:] except IndexError: return "not valid string" def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" return to_complex_case(__UpperCamelCase , __UpperCamelCase , '''_''' ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" return to_complex_case(__UpperCamelCase , __UpperCamelCase , '''-''' ) if __name__ == "__main__": __import__('doctest').testmod()
266
1
"""simple docstring""" import os import unittest from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : str = LayoutLMTokenizer A_ : Dict = LayoutLMTokenizerFast A_ : List[str] = True A_ : Optional[int] = True def _SCREAMING_SNAKE_CASE ( self : List[str] ): '''simple docstring''' super().setUp() __A = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] __A = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def _SCREAMING_SNAKE_CASE ( self : Any, **_lowerCamelCase : Optional[int] ): '''simple docstring''' return LayoutLMTokenizer.from_pretrained(self.tmpdirname, **_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Tuple, _lowerCamelCase : List[str] ): '''simple docstring''' __A = '''UNwant\u00E9d,running''' __A = '''unwanted, running''' return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' __A = self.tokenizer_class(self.vocab_file ) __A = tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(_lowerCamelCase, ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ), [7, 4, 5, 10, 8, 9] ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' pass
266
"""simple docstring""" from __future__ import annotations class snake_case : '''simple docstring''' def __init__( self : int, _lowerCamelCase : List[Any]=None ): '''simple docstring''' __A = data __A = None def __repr__( self : Union[str, Any] ): '''simple docstring''' __A = [] __A = self while temp: string_rep.append(f'{temp.data}' ) __A = temp.next return "->".join(_lowerCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" if not elements_list: raise Exception('''The Elements List is empty''' ) __A = __A = Node(elements_list[0] ) for i in range(1 , len(__UpperCamelCase ) ): __A = Node(elements_list[i] ) __A = current.next return head def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" if head_node is not None and isinstance(__UpperCamelCase , __UpperCamelCase ): print_reverse(head_node.next ) print(head_node.data ) def lowerCAmelCase ( ): """simple docstring""" from doctest import testmod testmod() __A = make_linked_list([1_4, 5_2, 1_4, 1_2, 4_3] ) print('''Linked List:''' ) print(__UpperCamelCase ) print('''Elements in Reverse:''' ) print_reverse(__UpperCamelCase ) if __name__ == "__main__": main()
266
1
"""simple docstring""" import argparse import struct import unittest class snake_case : '''simple docstring''' def __init__( self : Optional[int], _lowerCamelCase : bytes ): '''simple docstring''' __A = data # Initialize hash values __A = [ 0X6a_09e_667, 0Xbb_67a_e85, 0X3c_6ef_372, 0Xa5_4ff_53a, 0X51_0e5_27f, 0X9b_056_88c, 0X1f_83d_9ab, 0X5b_e0c_d19, ] # Initialize round constants __A = [ 0X42_8a2_f98, 0X71_374_491, 0Xb5_c0f_bcf, 0Xe9_b5d_ba5, 0X39_56c_25b, 0X59_f11_1f1, 0X92_3f8_2a4, 0Xab_1c5_ed5, 0Xd8_07a_a98, 0X12_835_b01, 0X24_318_5be, 0X55_0c7_dc3, 0X72_be5_d74, 0X80_deb_1fe, 0X9b_dc0_6a7, 0Xc1_9bf_174, 0Xe4_9b6_9c1, 0Xef_be4_786, 0X0f_c19_dc6, 0X24_0ca_1cc, 0X2d_e92_c6f, 0X4a_748_4aa, 0X5c_b0a_9dc, 0X76_f98_8da, 0X98_3e5_152, 0Xa8_31c_66d, 0Xb0_032_7c8, 0Xbf_597_fc7, 0Xc6_e00_bf3, 0Xd5_a79_147, 0X06_ca6_351, 0X14_292_967, 0X27_b70_a85, 0X2e_1b2_138, 0X4d_2c6_dfc, 0X53_380_d13, 0X65_0a7_354, 0X76_6a0_abb, 0X81_c2c_92e, 0X92_722_c85, 0Xa2_bfe_8a1, 0Xa8_1a6_64b, 0Xc2_4b8_b70, 0Xc7_6c5_1a3, 0Xd1_92e_819, 0Xd6_990_624, 0Xf4_0e3_585, 0X10_6aa_070, 0X19_a4c_116, 0X1e_376_c08, 0X27_487_74c, 0X34_b0b_cb5, 0X39_1c0_cb3, 0X4e_d8a_a4a, 0X5b_9cc_a4f, 0X68_2e6_ff3, 0X74_8f8_2ee, 0X78_a56_36f, 0X84_c87_814, 0X8c_c70_208, 0X90_bef_ffa, 0Xa4_506_ceb, 0Xbe_f9a_3f7, 0Xc6_717_8f2, ] __A = self.preprocessing(self.data ) self.final_hash() @staticmethod def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : bytes ): '''simple docstring''' __A = b'''\x80''' + (b'''\x00''' * (63 - (len(_lowerCamelCase ) + 8) % 64)) __A = struct.pack('''>Q''', (len(_lowerCamelCase ) * 8) ) return data + padding + big_endian_integer def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' # Convert into blocks of 64 bytes __A = [ self.preprocessed_data[x : x + 64] for x in range(0, len(self.preprocessed_data ), 64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers __A = list(struct.unpack('''>16L''', _lowerCamelCase ) ) # add 48 0-ed integers words += [0] * 48 __A , __A , __A , __A , __A , __A , __A , __A = self.hashes for index in range(0, 64 ): if index > 15: # modify the zero-ed indexes at the end of the array __A = ( self.ror(words[index - 15], 7 ) ^ self.ror(words[index - 15], 18 ) ^ (words[index - 15] >> 3) ) __A = ( self.ror(words[index - 2], 17 ) ^ self.ror(words[index - 2], 19 ) ^ (words[index - 2] >> 10) ) __A = ( words[index - 16] + sa + words[index - 7] + sa ) % 0X100_000_000 # Compression __A = self.ror(_lowerCamelCase, 6 ) ^ self.ror(_lowerCamelCase, 11 ) ^ self.ror(_lowerCamelCase, 25 ) __A = (e & f) ^ ((~e & 0Xff_fff_fff) & g) __A = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0X100_000_000 __A = self.ror(_lowerCamelCase, 2 ) ^ self.ror(_lowerCamelCase, 13 ) ^ self.ror(_lowerCamelCase, 22 ) __A = (a & b) ^ (a & c) ^ (b & c) __A = (sa + maj) % 0X100_000_000 __A , __A , __A , __A , __A , __A , __A , __A = ( g, f, e, ((d + tempa) % 0X100_000_000), c, b, a, ((tempa + tempa) % 0X100_000_000), ) __A = [a, b, c, d, e, f, g, h] # Modify final values __A = [ ((element + mutated_hash_values[index]) % 0X100_000_000) for index, element in enumerate(self.hashes ) ] __A = ''''''.join([hex(_lowerCamelCase )[2:].zfill(8 ) for value in self.hashes] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : int, _lowerCamelCase : int ): '''simple docstring''' return 0Xff_fff_fff & (value << (32 - rotations)) | (value >> rotations) class snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' import hashlib __A = bytes('''Test String''', '''utf-8''' ) self.assertEqual(SHAaaa(_lowerCamelCase ).hash, hashlib.shaaaa(_lowerCamelCase ).hexdigest() ) def lowerCAmelCase ( ): """simple docstring""" import doctest doctest.testmod() __A = argparse.ArgumentParser() parser.add_argument( '''-s''' , '''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , ) parser.add_argument( '''-f''' , '''--file''' , dest='''input_file''' , help='''Hash contents of a file''' ) __A = parser.parse_args() __A = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , '''rb''' ) as f: __A = f.read() else: __A = bytes(__UpperCamelCase , '''utf-8''' ) print(SHAaaa(__UpperCamelCase ).hash ) if __name__ == "__main__": main()
266
"""simple docstring""" from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging lowercase_ = logging.get_logger(__name__) class snake_case ( _lowerCAmelCase ): '''simple docstring''' A_ : int = ["input_features", "attention_mask"] def __init__( self : Optional[Any], _lowerCamelCase : Union[str, Any]=80, _lowerCamelCase : int=1_60_00, _lowerCamelCase : Any=80, _lowerCamelCase : List[str]=0.0, _lowerCamelCase : int=True, _lowerCamelCase : Optional[Any]=True, _lowerCamelCase : Optional[int]=True, **_lowerCamelCase : List[str], ): '''simple docstring''' super().__init__(feature_size=_lowerCamelCase, sampling_rate=_lowerCamelCase, padding_value=_lowerCamelCase, **_lowerCamelCase ) __A = num_mel_bins __A = do_ceptral_normalize __A = normalize_means __A = normalize_vars __A = True def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : np.ndarray, ): '''simple docstring''' __A = waveform * (2**15) # Kaldi compliance: 16-bit signed integers __A = torch.from_numpy(_lowerCamelCase ).unsqueeze(0 ) __A = ta_kaldi.fbank(_lowerCamelCase, num_mel_bins=self.num_mel_bins, sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : np.ndarray, _lowerCamelCase : int, _lowerCamelCase : Optional[bool] = True, _lowerCamelCase : Optional[bool] = True, _lowerCamelCase : float = 0.0, ): '''simple docstring''' # make sure we normalize float32 arrays if normalize_means: __A = x[:input_length].mean(axis=0 ) __A = np.subtract(_lowerCamelCase, _lowerCamelCase ) if normalize_vars: __A = x[:input_length].std(axis=0 ) __A = np.divide(_lowerCamelCase, _lowerCamelCase ) if input_length < x.shape[0]: __A = padding_value # make sure array is in float32 __A = x.astype(np.floataa ) return x def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : List[np.ndarray], _lowerCamelCase : Optional[np.ndarray] = None ): '''simple docstring''' __A = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(_lowerCamelCase, _lowerCamelCase, self.normalize_means, self.normalize_vars, self.padding_value ) for x, n in zip(_lowerCamelCase, _lowerCamelCase ) ] def __call__( self : Optional[Any], _lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], _lowerCamelCase : Union[bool, str, PaddingStrategy] = False, _lowerCamelCase : Optional[int] = None, _lowerCamelCase : bool = False, _lowerCamelCase : Optional[int] = None, _lowerCamelCase : Optional[Union[str, TensorType]] = None, _lowerCamelCase : Optional[int] = None, _lowerCamelCase : Optional[bool] = None, **_lowerCamelCase : Optional[Any], ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of' f' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with' f' {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) __A = isinstance(_lowerCamelCase, np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'Only mono-channel audio is supported for input to {self}' ) __A = is_batched_numpy or ( isinstance(_lowerCamelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) )) ) if is_batched: __A = [np.asarray(_lowerCamelCase, dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(_lowerCamelCase, np.ndarray ): __A = np.asarray(_lowerCamelCase, dtype=np.floataa ) elif isinstance(_lowerCamelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __A = raw_speech.astype(np.floataa ) # always return batch if not is_batched: __A = [raw_speech] # extract fbank features __A = [self._extract_fbank_features(_lowerCamelCase ) for waveform in raw_speech] # convert into correct format for padding __A = BatchFeature({'''input_features''': features} ) __A = self.pad( _lowerCamelCase, padding=_lowerCamelCase, max_length=_lowerCamelCase, truncation=_lowerCamelCase, pad_to_multiple_of=_lowerCamelCase, return_attention_mask=_lowerCamelCase, **_lowerCamelCase, ) # make sure list is in array format __A = padded_inputs.get('''input_features''' ) if isinstance(input_features[0], _lowerCamelCase ): __A = [np.asarray(_lowerCamelCase, dtype=np.floataa ) for feature in input_features] __A = padded_inputs.get('''attention_mask''' ) if attention_mask is not None: __A = [np.asarray(_lowerCamelCase, dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: __A = ( np.array(_lowerCamelCase, dtype=np.intaa ) if self._get_padding_strategies(_lowerCamelCase, max_length=_lowerCamelCase ) is not PaddingStrategy.DO_NOT_PAD else None ) __A = self.normalize( padded_inputs['''input_features'''], attention_mask=_lowerCamelCase ) if return_tensors is not None: __A = padded_inputs.convert_to_tensors(_lowerCamelCase ) return padded_inputs
266
1
"""simple docstring""" import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler lowercase_ = 16 lowercase_ = 32 def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase = 1_6 , __UpperCamelCase = "bert-base-cased" ): """simple docstring""" __A = AutoTokenizer.from_pretrained(__UpperCamelCase ) __A = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(__UpperCamelCase ): # max_length=None => use the model max length (it's actually the default) __A = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__UpperCamelCase , max_length=__UpperCamelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset __A = datasets.map( __UpperCamelCase , batched=__UpperCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=__UpperCamelCase ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __A = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(__UpperCamelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(__UpperCamelCase , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' ) return tokenizer.pad(__UpperCamelCase , padding='''longest''' , return_tensors='''pt''' ) # Instantiate dataloaders. __A = DataLoader( tokenized_datasets['''train'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase ) __A = DataLoader( tokenized_datasets['''validation'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase ) return train_dataloader, eval_dataloader def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" __A = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __A = config['''lr'''] __A = int(config['''num_epochs'''] ) __A = int(config['''seed'''] ) __A = int(config['''batch_size'''] ) __A = args.model_name_or_path set_seed(__UpperCamelCase ) __A , __A = get_dataloaders(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __A = AutoModelForSequenceClassification.from_pretrained(__UpperCamelCase , return_dict=__UpperCamelCase ) # Instantiate optimizer __A = ( AdamW if accelerator.state.deepspeed_plugin is None or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) __A = optimizer_cls(params=model.parameters() , lr=__UpperCamelCase ) if accelerator.state.deepspeed_plugin is not None: __A = accelerator.state.deepspeed_plugin.deepspeed_config[ '''gradient_accumulation_steps''' ] else: __A = 1 __A = (len(__UpperCamelCase ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): __A = get_linear_schedule_with_warmup( optimizer=__UpperCamelCase , num_warmup_steps=0 , num_training_steps=__UpperCamelCase , ) else: __A = DummyScheduler(__UpperCamelCase , total_num_steps=__UpperCamelCase , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __A , __A , __A , __A , __A = accelerator.prepare( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # We need to keep track of how many total steps we have iterated over __A = 0 # We also need to keep track of the stating epoch so files are named properly __A = 0 # Now we train the model __A = evaluate.load('''glue''' , '''mrpc''' ) __A = 0 __A = {} for epoch in range(__UpperCamelCase , __UpperCamelCase ): model.train() for step, batch in enumerate(__UpperCamelCase ): __A = model(**__UpperCamelCase ) __A = outputs.loss __A = loss / gradient_accumulation_steps accelerator.backward(__UpperCamelCase ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 model.eval() __A = 0 for step, batch in enumerate(__UpperCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __A = model(**__UpperCamelCase ) __A = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times __A , __A = accelerator.gather( (predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(__UpperCamelCase ) - 1: __A = predictions[: len(eval_dataloader.dataset ) - samples_seen] __A = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=__UpperCamelCase , references=__UpperCamelCase , ) __A = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'epoch {epoch}:' , __UpperCamelCase ) __A = eval_metric['''accuracy'''] if best_performance < eval_metric["accuracy"]: __A = eval_metric['''accuracy'''] if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), f'Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}' accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , '''all_results.json''' ) , '''w''' ) as f: json.dump(__UpperCamelCase , __UpperCamelCase ) def lowerCAmelCase ( ): """simple docstring""" __A = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' ) parser.add_argument( '''--model_name_or_path''' , type=__UpperCamelCase , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=__UpperCamelCase , ) parser.add_argument( '''--output_dir''' , type=__UpperCamelCase , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , ) parser.add_argument( '''--performance_lower_bound''' , type=__UpperCamelCase , default=__UpperCamelCase , help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''' , ) parser.add_argument( '''--num_epochs''' , type=__UpperCamelCase , default=3 , help='''Number of train epochs.''' , ) __A = parser.parse_args() __A = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 4_2, '''batch_size''': 1_6} training_function(__UpperCamelCase , __UpperCamelCase ) if __name__ == "__main__": main()
266
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class snake_case ( unittest.TestCase ): '''simple docstring''' def __init__( self : List[str], _lowerCamelCase : Optional[Any], _lowerCamelCase : Union[str, Any]=13, _lowerCamelCase : Any=3, _lowerCamelCase : Optional[int]=2_24, _lowerCamelCase : str=30, _lowerCamelCase : Dict=4_00, _lowerCamelCase : Union[str, Any]=True, _lowerCamelCase : Any=None, _lowerCamelCase : Optional[Any]=True, _lowerCamelCase : Any=[0.5, 0.5, 0.5], _lowerCamelCase : List[str]=[0.5, 0.5, 0.5], ): '''simple docstring''' __A = size if size is not None else {'''height''': 18, '''width''': 18} __A = parent __A = batch_size __A = num_channels __A = image_size __A = min_resolution __A = max_resolution __A = do_resize __A = size __A = do_normalize __A = image_mean __A = image_std def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : str = ViTImageProcessor if is_vision_available() else None def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = EfficientFormerImageProcessorTester(self ) @property def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' return self.image_proc_tester.prepare_image_processor_dict() def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCamelCase, '''image_mean''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''image_std''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''do_normalize''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''do_resize''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''size''' ) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' # Initialize image_processor __A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __A = prepare_image_inputs(self.image_proc_tester, equal_resolution=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, Image.Image ) # Test not batched input __A = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), ) # Test batched __A = image_processor(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), ) def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' # Initialize image_processor __A = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __A = prepare_image_inputs(self.image_proc_tester, equal_resolution=_lowerCamelCase, numpify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, np.ndarray ) # Test not batched input __A = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), ) # Test batched __A = image_processor(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), ) def _SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' # Initialize image_processor __A = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __A = prepare_image_inputs(self.image_proc_tester, equal_resolution=_lowerCamelCase, torchify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, torch.Tensor ) # Test not batched input __A = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), ) # Test batched __A = image_processor(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ), )
266
1
"""simple docstring""" lowercase_ = {str(digit): digit**5 for digit in range(10)} def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" return sum(DIGITS_FIFTH_POWER[digit] for digit in str(__UpperCamelCase ) ) def lowerCAmelCase ( ): """simple docstring""" return sum( number for number in range(1_0_0_0 , 1_0_0_0_0_0_0 ) if number == digits_fifth_powers_sum(__UpperCamelCase ) ) if __name__ == "__main__": print(solution())
266
"""simple docstring""" import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor lowercase_ = logging.get_logger(__name__) class snake_case ( _lowerCAmelCase ): '''simple docstring''' def __init__( self : Optional[int], *_lowerCamelCase : Union[str, Any], **_lowerCamelCase : Dict ): '''simple docstring''' warnings.warn( '''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use SegformerImageProcessor instead.''', _lowerCamelCase, ) super().__init__(*_lowerCamelCase, **_lowerCamelCase )
266
1
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = '▁' lowercase_ = {'vocab_file': 'sentencepiece.bpe.model'} lowercase_ = { 'vocab_file': { 'facebook/mbart-large-50-one-to-many-mmt': ( 'https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model' ), } } lowercase_ = { 'facebook/mbart-large-50-one-to-many-mmt': 1024, } # fmt: off lowercase_ = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN', 'af_ZA', 'az_AZ', 'bn_IN', 'fa_IR', 'he_IL', 'hr_HR', 'id_ID', 'ka_GE', 'km_KH', 'mk_MK', 'ml_IN', 'mn_MN', 'mr_IN', 'pl_PL', 'ps_AF', 'pt_XX', 'sv_SE', 'sw_KE', 'ta_IN', 'te_IN', 'th_TH', 'tl_XX', 'uk_UA', 'ur_PK', 'xh_ZA', 'gl_ES', 'sl_SI'] class snake_case ( _lowerCAmelCase ): '''simple docstring''' A_ : Tuple = VOCAB_FILES_NAMES A_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A_ : Tuple = PRETRAINED_VOCAB_FILES_MAP A_ : Dict = ["input_ids", "attention_mask"] A_ : List[int] = [] A_ : List[int] = [] def __init__( self : Dict, _lowerCamelCase : Any, _lowerCamelCase : List[Any]=None, _lowerCamelCase : List[Any]=None, _lowerCamelCase : Optional[Any]="</s>", _lowerCamelCase : Tuple="</s>", _lowerCamelCase : Dict="<s>", _lowerCamelCase : Optional[Any]="<unk>", _lowerCamelCase : Optional[int]="<pad>", _lowerCamelCase : Optional[int]="<mask>", _lowerCamelCase : Optional[Dict[str, Any]] = None, **_lowerCamelCase : Dict, ): '''simple docstring''' # Mask token behave like a normal word, i.e. include the space before it __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else mask_token __A = {} if sp_model_kwargs is None else sp_model_kwargs __A = kwargs.get('''additional_special_tokens''', [] ) kwargs["additional_special_tokens"] += [ code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=_lowerCamelCase, tgt_lang=_lowerCamelCase, eos_token=_lowerCamelCase, unk_token=_lowerCamelCase, sep_token=_lowerCamelCase, cls_token=_lowerCamelCase, pad_token=_lowerCamelCase, mask_token=_lowerCamelCase, sp_model_kwargs=self.sp_model_kwargs, **_lowerCamelCase, ) __A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_lowerCamelCase ) ) __A = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token __A = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab __A = 1 __A = len(self.sp_model ) __A = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_lowerCamelCase ) } __A = {v: k for k, v in self.lang_code_to_id.items()} __A = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) __A = {v: k for k, v in self.fairseq_tokens_to_ids.items()} __A = src_lang if src_lang is not None else '''en_XX''' __A = self.lang_code_to_id[self._src_lang] __A = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' return self._src_lang @src_lang.setter def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : str ): '''simple docstring''' __A = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self : Dict ): '''simple docstring''' __A = self.__dict__.copy() __A = None return state def __setstate__( self : Union[str, Any], _lowerCamelCase : Dict ): '''simple docstring''' __A = d # for backward compatibility if not hasattr(self, '''sp_model_kwargs''' ): __A = {} __A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : str ): '''simple docstring''' return self.sp_model.encode(_lowerCamelCase, out_type=_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : str ): '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __A = self.sp_model.PieceToId(_lowerCamelCase ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : int ): '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : Optional[Any] ): '''simple docstring''' __A = [] __A = '''''' __A = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_lowerCamelCase ) + token __A = True __A = [] else: current_sub_tokens.append(_lowerCamelCase ) __A = False out_string += self.sp_model.decode(_lowerCamelCase ) return out_string.strip() def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : str, _lowerCamelCase : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(_lowerCamelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __A = os.path.join( _lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file, _lowerCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(_lowerCamelCase, '''wb''' ) as fi: __A = self.sp_model.serialized_model_proto() fi.write(_lowerCamelCase ) return (out_vocab_file,) def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : List[int], _lowerCamelCase : Optional[List[int]] = None, _lowerCamelCase : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCamelCase, token_ids_a=_lowerCamelCase, already_has_special_tokens=_lowerCamelCase ) __A = [1] * len(self.prefix_tokens ) __A = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(_lowerCamelCase )) + suffix_ones return prefix_ones + ([0] * len(_lowerCamelCase )) + ([0] * len(_lowerCamelCase )) + suffix_ones def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : List[int], _lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : List[Any], _lowerCamelCase : str, _lowerCamelCase : Optional[str], _lowerCamelCase : Optional[str], **_lowerCamelCase : Dict ): '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' ) __A = src_lang __A = self(_lowerCamelCase, add_special_tokens=_lowerCamelCase, return_tensors=_lowerCamelCase, **_lowerCamelCase ) __A = self.convert_tokens_to_ids(_lowerCamelCase ) __A = tgt_lang_id return inputs def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : List[str], _lowerCamelCase : str = "en_XX", _lowerCamelCase : Optional[List[str]] = None, _lowerCamelCase : str = "ro_RO", **_lowerCamelCase : Dict, ): '''simple docstring''' __A = src_lang __A = tgt_lang return super().prepare_seqaseq_batch(_lowerCamelCase, _lowerCamelCase, **_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' return self.set_src_lang_special_tokens(self.src_lang ) def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' return self.set_tgt_lang_special_tokens(self.tgt_lang ) def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : str ): '''simple docstring''' __A = self.lang_code_to_id[src_lang] __A = [self.cur_lang_code_id] __A = [self.eos_token_id] def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : str ): '''simple docstring''' __A = self.lang_code_to_id[tgt_lang] __A = [self.cur_lang_code_id] __A = [self.eos_token_id]
266
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class snake_case ( unittest.TestCase ): '''simple docstring''' def __init__( self : List[Any], _lowerCamelCase : int, _lowerCamelCase : List[Any]=7, _lowerCamelCase : int=3, _lowerCamelCase : Optional[Any]=18, _lowerCamelCase : Any=30, _lowerCamelCase : str=4_00, _lowerCamelCase : int=True, _lowerCamelCase : Union[str, Any]=None, _lowerCamelCase : str=True, ): '''simple docstring''' __A = size if size is not None else {'''height''': 18, '''width''': 18} __A = parent __A = batch_size __A = num_channels __A = image_size __A = min_resolution __A = max_resolution __A = do_resize __A = size __A = apply_ocr def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : Optional[int] = LayoutLMvaImageProcessor if is_pytesseract_available() else None def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = LayoutLMvaImageProcessingTester(self ) @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' __A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCamelCase, '''do_resize''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''size''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''apply_ocr''' ) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' __A = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size, {'''height''': 18, '''width''': 18} ) __A = self.image_processing_class.from_dict(self.image_processor_dict, size=42 ) self.assertEqual(image_processor.size, {'''height''': 42, '''width''': 42} ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' # Initialize image_processing __A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, Image.Image ) # Test not batched input __A = image_processing(image_inputs[0], return_tensors='''pt''' ) self.assertEqual( encoding.pixel_values.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) self.assertIsInstance(encoding.words, _lowerCamelCase ) self.assertIsInstance(encoding.boxes, _lowerCamelCase ) # Test batched __A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' # Initialize image_processing __A = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase, numpify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, np.ndarray ) # Test not batched input __A = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) # Test batched __A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' # Initialize image_processing __A = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase, torchify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase, torch.Tensor ) # Test not batched input __A = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) # Test batched __A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ), ) def _SCREAMING_SNAKE_CASE ( self : List[str] ): '''simple docstring''' # with apply_OCR = True __A = LayoutLMvaImageProcessor() from datasets import load_dataset __A = load_dataset('''hf-internal-testing/fixtures_docvqa''', split='''test''' ) __A = Image.open(ds[0]['''file'''] ).convert('''RGB''' ) __A = image_processing(_lowerCamelCase, return_tensors='''pt''' ) self.assertEqual(encoding.pixel_values.shape, (1, 3, 2_24, 2_24) ) self.assertEqual(len(encoding.words ), len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 __A = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231 __A = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words, _lowerCamelCase ) self.assertListEqual(encoding.boxes, _lowerCamelCase ) # with apply_OCR = False __A = LayoutLMvaImageProcessor(apply_ocr=_lowerCamelCase ) __A = image_processing(_lowerCamelCase, return_tensors='''pt''' ) self.assertEqual(encoding.pixel_values.shape, (1, 3, 2_24, 2_24) )
266
1
"""simple docstring""" def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" if length <= 0 or not isinstance(__UpperCamelCase , __UpperCamelCase ): raise ValueError('''Length must be a positive integer.''' ) return [n * (2 * n - 1) for n in range(__UpperCamelCase )] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=10))
266
"""simple docstring""" import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class snake_case ( ctypes.Structure ): '''simple docstring''' A_ : List[str] = [("size", ctypes.c_int), ("visible", ctypes.c_byte)] def lowerCAmelCase ( ): """simple docstring""" if os.name == "nt": __A = CursorInfo() __A = ctypes.windll.kernelaa.GetStdHandle(-1_1 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) ) __A = False ctypes.windll.kernelaa.SetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) ) elif os.name == "posix": sys.stdout.write('''\033[?25l''' ) sys.stdout.flush() def lowerCAmelCase ( ): """simple docstring""" if os.name == "nt": __A = CursorInfo() __A = ctypes.windll.kernelaa.GetStdHandle(-1_1 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) ) __A = True ctypes.windll.kernelaa.SetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) ) elif os.name == "posix": sys.stdout.write('''\033[?25h''' ) sys.stdout.flush() @contextmanager def lowerCAmelCase ( ): """simple docstring""" try: hide_cursor() yield finally: show_cursor()
266
1
"""simple docstring""" def lowerCAmelCase ( ): """simple docstring""" __A = [3_1, 2_8, 3_1, 3_0, 3_1, 3_0, 3_1, 3_1, 3_0, 3_1, 3_0, 3_1] __A = 6 __A = 1 __A = 1_9_0_1 __A = 0 while year < 2_0_0_1: day += 7 if (year % 4 == 0 and year % 1_0_0 != 0) or (year % 4_0_0 == 0): if day > days_per_month[month - 1] and month != 2: month += 1 __A = day - days_per_month[month - 2] elif day > 2_9 and month == 2: month += 1 __A = day - 2_9 else: if day > days_per_month[month - 1]: month += 1 __A = day - days_per_month[month - 2] if month > 1_2: year += 1 __A = 1 if year < 2_0_0_1 and day == 1: sundays += 1 return sundays if __name__ == "__main__": print(solution())
266
"""simple docstring""" import argparse import struct import unittest class snake_case : '''simple docstring''' def __init__( self : Optional[int], _lowerCamelCase : bytes ): '''simple docstring''' __A = data # Initialize hash values __A = [ 0X6a_09e_667, 0Xbb_67a_e85, 0X3c_6ef_372, 0Xa5_4ff_53a, 0X51_0e5_27f, 0X9b_056_88c, 0X1f_83d_9ab, 0X5b_e0c_d19, ] # Initialize round constants __A = [ 0X42_8a2_f98, 0X71_374_491, 0Xb5_c0f_bcf, 0Xe9_b5d_ba5, 0X39_56c_25b, 0X59_f11_1f1, 0X92_3f8_2a4, 0Xab_1c5_ed5, 0Xd8_07a_a98, 0X12_835_b01, 0X24_318_5be, 0X55_0c7_dc3, 0X72_be5_d74, 0X80_deb_1fe, 0X9b_dc0_6a7, 0Xc1_9bf_174, 0Xe4_9b6_9c1, 0Xef_be4_786, 0X0f_c19_dc6, 0X24_0ca_1cc, 0X2d_e92_c6f, 0X4a_748_4aa, 0X5c_b0a_9dc, 0X76_f98_8da, 0X98_3e5_152, 0Xa8_31c_66d, 0Xb0_032_7c8, 0Xbf_597_fc7, 0Xc6_e00_bf3, 0Xd5_a79_147, 0X06_ca6_351, 0X14_292_967, 0X27_b70_a85, 0X2e_1b2_138, 0X4d_2c6_dfc, 0X53_380_d13, 0X65_0a7_354, 0X76_6a0_abb, 0X81_c2c_92e, 0X92_722_c85, 0Xa2_bfe_8a1, 0Xa8_1a6_64b, 0Xc2_4b8_b70, 0Xc7_6c5_1a3, 0Xd1_92e_819, 0Xd6_990_624, 0Xf4_0e3_585, 0X10_6aa_070, 0X19_a4c_116, 0X1e_376_c08, 0X27_487_74c, 0X34_b0b_cb5, 0X39_1c0_cb3, 0X4e_d8a_a4a, 0X5b_9cc_a4f, 0X68_2e6_ff3, 0X74_8f8_2ee, 0X78_a56_36f, 0X84_c87_814, 0X8c_c70_208, 0X90_bef_ffa, 0Xa4_506_ceb, 0Xbe_f9a_3f7, 0Xc6_717_8f2, ] __A = self.preprocessing(self.data ) self.final_hash() @staticmethod def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : bytes ): '''simple docstring''' __A = b'''\x80''' + (b'''\x00''' * (63 - (len(_lowerCamelCase ) + 8) % 64)) __A = struct.pack('''>Q''', (len(_lowerCamelCase ) * 8) ) return data + padding + big_endian_integer def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' # Convert into blocks of 64 bytes __A = [ self.preprocessed_data[x : x + 64] for x in range(0, len(self.preprocessed_data ), 64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers __A = list(struct.unpack('''>16L''', _lowerCamelCase ) ) # add 48 0-ed integers words += [0] * 48 __A , __A , __A , __A , __A , __A , __A , __A = self.hashes for index in range(0, 64 ): if index > 15: # modify the zero-ed indexes at the end of the array __A = ( self.ror(words[index - 15], 7 ) ^ self.ror(words[index - 15], 18 ) ^ (words[index - 15] >> 3) ) __A = ( self.ror(words[index - 2], 17 ) ^ self.ror(words[index - 2], 19 ) ^ (words[index - 2] >> 10) ) __A = ( words[index - 16] + sa + words[index - 7] + sa ) % 0X100_000_000 # Compression __A = self.ror(_lowerCamelCase, 6 ) ^ self.ror(_lowerCamelCase, 11 ) ^ self.ror(_lowerCamelCase, 25 ) __A = (e & f) ^ ((~e & 0Xff_fff_fff) & g) __A = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0X100_000_000 __A = self.ror(_lowerCamelCase, 2 ) ^ self.ror(_lowerCamelCase, 13 ) ^ self.ror(_lowerCamelCase, 22 ) __A = (a & b) ^ (a & c) ^ (b & c) __A = (sa + maj) % 0X100_000_000 __A , __A , __A , __A , __A , __A , __A , __A = ( g, f, e, ((d + tempa) % 0X100_000_000), c, b, a, ((tempa + tempa) % 0X100_000_000), ) __A = [a, b, c, d, e, f, g, h] # Modify final values __A = [ ((element + mutated_hash_values[index]) % 0X100_000_000) for index, element in enumerate(self.hashes ) ] __A = ''''''.join([hex(_lowerCamelCase )[2:].zfill(8 ) for value in self.hashes] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : int, _lowerCamelCase : int ): '''simple docstring''' return 0Xff_fff_fff & (value << (32 - rotations)) | (value >> rotations) class snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' import hashlib __A = bytes('''Test String''', '''utf-8''' ) self.assertEqual(SHAaaa(_lowerCamelCase ).hash, hashlib.shaaaa(_lowerCamelCase ).hexdigest() ) def lowerCAmelCase ( ): """simple docstring""" import doctest doctest.testmod() __A = argparse.ArgumentParser() parser.add_argument( '''-s''' , '''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , ) parser.add_argument( '''-f''' , '''--file''' , dest='''input_file''' , help='''Hash contents of a file''' ) __A = parser.parse_args() __A = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , '''rb''' ) as f: __A = f.read() else: __A = bytes(__UpperCamelCase , '''utf-8''' ) print(SHAaaa(__UpperCamelCase ).hash ) if __name__ == "__main__": main()
266
1
"""simple docstring""" from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase_ = { 'configuration_autoformer': [ 'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AutoformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ 'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'AutoformerForPrediction', 'AutoformerModel', 'AutoformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_autoformer import ( AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_autoformer import ( AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, AutoformerForPrediction, AutoformerModel, AutoformerPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
266
"""simple docstring""" import faiss # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import requests # noqa: F401 # Here to have a nice missing dependency error message early on import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on from mauve import compute_mauve # From: mauve-text import datasets lowercase_ = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n' lowercase_ = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n' lowercase_ = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case ( datasets.Metric ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, homepage='''https://github.com/krishnap25/mauve''', inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { '''predictions''': datasets.Value('''string''', id='''sequence''' ), '''references''': datasets.Value('''string''', id='''sequence''' ), } ), codebase_urls=['''https://github.com/krishnap25/mauve'''], reference_urls=[ '''https://arxiv.org/abs/2102.01454''', '''https://github.com/krishnap25/mauve''', ], ) def _SCREAMING_SNAKE_CASE ( self : int, _lowerCamelCase : str, _lowerCamelCase : Optional[Any], _lowerCamelCase : Any=None, _lowerCamelCase : Tuple=None, _lowerCamelCase : Optional[Any]=None, _lowerCamelCase : Union[str, Any]=None, _lowerCamelCase : str="auto", _lowerCamelCase : Union[str, Any]=-1, _lowerCamelCase : List[str]=0.9, _lowerCamelCase : int=5, _lowerCamelCase : Tuple=5_00, _lowerCamelCase : Union[str, Any]="gpt2-large", _lowerCamelCase : int=-1, _lowerCamelCase : Union[str, Any]=10_24, _lowerCamelCase : Union[str, Any]=25, _lowerCamelCase : str=5, _lowerCamelCase : Any=True, _lowerCamelCase : Union[str, Any]=25, ): '''simple docstring''' __A = compute_mauve( p_text=_lowerCamelCase, q_text=_lowerCamelCase, p_features=_lowerCamelCase, q_features=_lowerCamelCase, p_tokens=_lowerCamelCase, q_tokens=_lowerCamelCase, num_buckets=_lowerCamelCase, pca_max_data=_lowerCamelCase, kmeans_explained_var=_lowerCamelCase, kmeans_num_redo=_lowerCamelCase, kmeans_max_iter=_lowerCamelCase, featurize_model_name=_lowerCamelCase, device_id=_lowerCamelCase, max_text_length=_lowerCamelCase, divergence_curve_discretization_size=_lowerCamelCase, mauve_scaling_factor=_lowerCamelCase, verbose=_lowerCamelCase, seed=_lowerCamelCase, ) return out
266
1
"""simple docstring""" import importlib import inspect import json import os import re import shutil import sys from pathlib import Path from typing import Dict, Optional, Union from urllib import request from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info from packaging import version from .. import __version__ from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging lowercase_ = ( 'https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py' ) lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name def lowerCAmelCase ( ): """simple docstring""" __A = '''https://pypi.org/pypi/diffusers/json''' __A = json.loads(request.urlopen(__UpperCamelCase ).read() )['''releases'''].keys() return sorted(__UpperCamelCase , key=lambda __UpperCamelCase : version.Version(__UpperCamelCase ) ) def lowerCAmelCase ( ): """simple docstring""" if HF_MODULES_CACHE in sys.path: return sys.path.append(__UpperCamelCase ) os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase ) __A = Path(__UpperCamelCase ) / '''__init__.py''' if not init_path.exists(): init_path.touch() def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" init_hf_modules() __A = Path(__UpperCamelCase ) / name # If the parent module does not exist yet, recursively create it. if not dynamic_module_path.parent.exists(): create_dynamic_module(dynamic_module_path.parent ) os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase ) __A = dynamic_module_path / '''__init__.py''' if not init_path.exists(): init_path.touch() def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" with open(__UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f: __A = f.read() # Imports of the form `import .xxx` __A = re.findall('''^\s*import\s+\.(\S+)\s*$''' , __UpperCamelCase , flags=re.MULTILINE ) # Imports of the form `from .xxx import yyy` relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , __UpperCamelCase , flags=re.MULTILINE ) # Unique-ify return list(set(__UpperCamelCase ) ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = False __A = [module_file] __A = [] # Let's recurse through all relative imports while not no_change: __A = [] for f in files_to_check: new_imports.extend(get_relative_imports(__UpperCamelCase ) ) __A = Path(__UpperCamelCase ).parent __A = [str(module_path / m ) for m in new_imports] __A = [f for f in new_import_files if f not in all_relative_imports] __A = [f'{f}.py' for f in new_import_files] __A = len(__UpperCamelCase ) == 0 all_relative_imports.extend(__UpperCamelCase ) return all_relative_imports def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" with open(__UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f: __A = f.read() # Imports of the form `import xxx` __A = re.findall('''^\s*import\s+(\S+)\s*$''' , __UpperCamelCase , flags=re.MULTILINE ) # Imports of the form `from xxx import yyy` imports += re.findall('''^\s*from\s+(\S+)\s+import''' , __UpperCamelCase , flags=re.MULTILINE ) # Only keep the top-level module __A = [imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )] # Unique-ify and test we got them all __A = list(set(__UpperCamelCase ) ) __A = [] for imp in imports: try: importlib.import_module(__UpperCamelCase ) except ImportError: missing_packages.append(__UpperCamelCase ) if len(__UpperCamelCase ) > 0: raise ImportError( '''This modeling file requires the following packages that were not found in your environment: ''' f'{", ".join(__UpperCamelCase )}. Run `pip install {" ".join(__UpperCamelCase )}`' ) return get_relative_imports(__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" __A = module_path.replace(os.path.sep , '''.''' ) __A = importlib.import_module(__UpperCamelCase ) if class_name is None: return find_pipeline_class(__UpperCamelCase ) return getattr(__UpperCamelCase , __UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" from ..pipelines import DiffusionPipeline __A = dict(inspect.getmembers(__UpperCamelCase , inspect.isclass ) ) __A = None for cls_name, cls in cls_members.items(): if ( cls_name != DiffusionPipeline.__name__ and issubclass(cls , __UpperCamelCase ) and cls.__module__.split('''.''' )[0] != "diffusers" ): if pipeline_class is not None: raise ValueError( f'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:' f' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in' f' {loaded_module}.' ) __A = cls return pipeline_class def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , ): """simple docstring""" __A = str(__UpperCamelCase ) __A = os.path.join(__UpperCamelCase , __UpperCamelCase ) if os.path.isfile(__UpperCamelCase ): __A = module_file_or_url __A = '''local''' elif pretrained_model_name_or_path.count('''/''' ) == 0: __A = get_diffusers_versions() # cut ".dev0" __A = '''v''' + '''.'''.join(__version__.split('''.''' )[:3] ) # retrieve github version that matches if revision is None: __A = latest_version if latest_version[1:] in available_versions else '''main''' logger.info(f'Defaulting to latest_version: {revision}.' ) elif revision in available_versions: __A = f'v{revision}' elif revision == "main": __A = revision else: raise ValueError( f'`custom_revision`: {revision} does not exist. Please make sure to choose one of' f' {", ".join(available_versions + ["main"] )}.' ) # community pipeline on GitHub __A = COMMUNITY_PIPELINES_URL.format(revision=__UpperCamelCase , pipeline=__UpperCamelCase ) try: __A = cached_download( __UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , proxies=__UpperCamelCase , resume_download=__UpperCamelCase , local_files_only=__UpperCamelCase , use_auth_token=__UpperCamelCase , ) __A = '''git''' __A = pretrained_model_name_or_path + '''.py''' except EnvironmentError: logger.error(f'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' ) raise else: try: # Load from URL or cache if already cached __A = hf_hub_download( __UpperCamelCase , __UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , proxies=__UpperCamelCase , resume_download=__UpperCamelCase , local_files_only=__UpperCamelCase , use_auth_token=__UpperCamelCase , ) __A = os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) ) except EnvironmentError: logger.error(f'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' ) raise # Check we have all the requirements in our environment __A = check_imports(__UpperCamelCase ) # Now we move the module inside our cached dynamic modules. __A = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule create_dynamic_module(__UpperCamelCase ) __A = Path(__UpperCamelCase ) / full_submodule if submodule == "local" or submodule == "git": # We always copy local files (we could hash the file to see if there was a change, and give them the name of # that hash, to only copy when there is a modification but it seems overkill for now). # The only reason we do the copy is to avoid putting too many folders in sys.path. shutil.copy(__UpperCamelCase , submodule_path / module_file ) for module_needed in modules_needed: __A = f'{module_needed}.py' shutil.copy(os.path.join(__UpperCamelCase , __UpperCamelCase ) , submodule_path / module_needed ) else: # Get the commit hash # TODO: we will get this info in the etag soon, so retrieve it from there and not here. if isinstance(__UpperCamelCase , __UpperCamelCase ): __A = use_auth_token elif use_auth_token is True: __A = HfFolder.get_token() else: __A = None __A = model_info(__UpperCamelCase , revision=__UpperCamelCase , token=__UpperCamelCase ).sha # The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the # benefit of versioning. __A = submodule_path / commit_hash __A = full_submodule + os.path.sep + commit_hash create_dynamic_module(__UpperCamelCase ) if not (submodule_path / module_file).exists(): shutil.copy(__UpperCamelCase , submodule_path / module_file ) # Make sure we also have every file with relative for module_needed in modules_needed: if not (submodule_path / module_needed).exists(): get_cached_module_file( __UpperCamelCase , f'{module_needed}.py' , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , resume_download=__UpperCamelCase , proxies=__UpperCamelCase , use_auth_token=__UpperCamelCase , revision=__UpperCamelCase , local_files_only=__UpperCamelCase , ) return os.path.join(__UpperCamelCase , __UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , **__UpperCamelCase , ): """simple docstring""" __A = get_cached_module_file( __UpperCamelCase , __UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , resume_download=__UpperCamelCase , proxies=__UpperCamelCase , use_auth_token=__UpperCamelCase , revision=__UpperCamelCase , local_files_only=__UpperCamelCase , ) return get_class_in_module(__UpperCamelCase , final_module.replace('''.py''' , '''''' ) )
266
"""simple docstring""" import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs lowercase_ = imread(R'digital_image_processing/image_data/lena_small.jpg') lowercase_ = cvtColor(img, COLOR_BGR2GRAY) def lowerCAmelCase ( ): """simple docstring""" __A = cn.convert_to_negative(__UpperCamelCase ) # assert negative_img array for at least one True assert negative_img.any() def lowerCAmelCase ( ): """simple docstring""" with Image.open('''digital_image_processing/image_data/lena_small.jpg''' ) as img: # Work around assertion for response assert str(cc.change_contrast(__UpperCamelCase , 1_1_0 ) ).startswith( '''<PIL.Image.Image image mode=RGB size=100x100 at''' ) def lowerCAmelCase ( ): """simple docstring""" __A = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def lowerCAmelCase ( ): """simple docstring""" __A = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0 ) # assert ambiguous array for all == True assert canny_img.all() __A = canny.canny(__UpperCamelCase ) # assert canny array for at least one True assert canny_array.any() def lowerCAmelCase ( ): """simple docstring""" assert gg.gaussian_filter(__UpperCamelCase , 5 , sigma=0.9 ).all() def lowerCAmelCase ( ): """simple docstring""" __A = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) __A = conv.img_convolve(__UpperCamelCase , __UpperCamelCase ).astype(__UpperCamelCase ) assert res.any() def lowerCAmelCase ( ): """simple docstring""" assert med.median_filter(__UpperCamelCase , 3 ).any() def lowerCAmelCase ( ): """simple docstring""" __A , __A = sob.sobel_filter(__UpperCamelCase ) assert grad.any() and theta.any() def lowerCAmelCase ( ): """simple docstring""" __A = sp.make_sepia(__UpperCamelCase , 2_0 ) assert sepia.all() def lowerCAmelCase ( __UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" ): """simple docstring""" __A = bs.Burkes(imread(__UpperCamelCase , 1 ) , 1_2_0 ) burkes.process() assert burkes.output_img.any() def lowerCAmelCase ( __UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" , ): """simple docstring""" __A = rs.NearestNeighbour(imread(__UpperCamelCase , 1 ) , 4_0_0 , 2_0_0 ) nn.process() assert nn.output.any() def lowerCAmelCase ( ): """simple docstring""" __A = '''digital_image_processing/image_data/lena.jpg''' # Reading the image and converting it to grayscale. __A = imread(__UpperCamelCase , 0 ) # Test for get_neighbors_pixel function() return not None __A = 0 __A = 0 __A = image[x_coordinate][y_coordinate] __A = lbp.get_neighbors_pixel( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image __A = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): __A = lbp.local_binary_value(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) assert lbp_image.any()
266
1
"""simple docstring""" import argparse import json from typing import List from ltp import LTP from transformers import BertTokenizer def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" if ( (cp >= 0x4_e_0_0 and cp <= 0x9_f_f_f) or (cp >= 0x3_4_0_0 and cp <= 0x4_d_b_f) # or (cp >= 0x2_0_0_0_0 and cp <= 0x2_a_6_d_f) # or (cp >= 0x2_a_7_0_0 and cp <= 0x2_b_7_3_f) # or (cp >= 0x2_b_7_4_0 and cp <= 0x2_b_8_1_f) # or (cp >= 0x2_b_8_2_0 and cp <= 0x2_c_e_a_f) # or (cp >= 0xf_9_0_0 and cp <= 0xf_a_f_f) or (cp >= 0x2_f_8_0_0 and cp <= 0x2_f_a_1_f) # ): # return True return False def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" for char in word: __A = ord(__UpperCamelCase ) if not _is_chinese_char(__UpperCamelCase ): return 0 return 1 def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = set() for token in tokens: __A = len(__UpperCamelCase ) > 1 and is_chinese(__UpperCamelCase ) if chinese_word: word_set.add(__UpperCamelCase ) __A = list(__UpperCamelCase ) return word_list def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" if not chinese_word_set: return bert_tokens __A = max([len(__UpperCamelCase ) for w in chinese_word_set] ) __A = bert_tokens __A , __A = 0, len(__UpperCamelCase ) while start < end: __A = True if is_chinese(bert_word[start] ): __A = min(end - start , __UpperCamelCase ) for i in range(__UpperCamelCase , 1 , -1 ): __A = ''''''.join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): __A = '''##''' + bert_word[j] __A = start + i __A = False break if single_word: start += 1 return bert_word def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" __A = [] for i in range(0 , len(__UpperCamelCase ) , 1_0_0 ): __A = ltp_tokenizer.seg(lines[i : i + 1_0_0] )[0] __A = [get_chinese_word(__UpperCamelCase ) for r in res] ltp_res.extend(__UpperCamelCase ) assert len(__UpperCamelCase ) == len(__UpperCamelCase ) __A = [] for i in range(0 , len(__UpperCamelCase ) , 1_0_0 ): __A = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=__UpperCamelCase , truncation=__UpperCamelCase , max_length=5_1_2 ) bert_res.extend(res['''input_ids'''] ) assert len(__UpperCamelCase ) == len(__UpperCamelCase ) __A = [] for input_ids, chinese_word in zip(__UpperCamelCase , __UpperCamelCase ): __A = [] for id in input_ids: __A = bert_tokenizer._convert_id_to_token(__UpperCamelCase ) input_tokens.append(__UpperCamelCase ) __A = add_sub_symbol(__UpperCamelCase , __UpperCamelCase ) __A = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(__UpperCamelCase ): if token[:2] == "##": __A = token[2:] # save chinese tokens' pos if len(__UpperCamelCase ) == 1 and _is_chinese_char(ord(__UpperCamelCase ) ): ref_id.append(__UpperCamelCase ) ref_ids.append(__UpperCamelCase ) assert len(__UpperCamelCase ) == len(__UpperCamelCase ) return ref_ids def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f: __A = f.readlines() __A = [line.strip() for line in data if len(__UpperCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' __A = LTP(args.ltp ) # faster in GPU device __A = BertTokenizer.from_pretrained(args.bert ) __A = prepare_ref(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f: __A = [json.dumps(__UpperCamelCase ) + '''\n''' for ref in ref_ids] f.writelines(__UpperCamelCase ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser(description='prepare_chinese_ref') parser.add_argument( '--file_name', type=str, default='./resources/chinese-demo.txt', help='file need process, same as training data in lm', ) parser.add_argument( '--ltp', type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path' ) parser.add_argument('--bert', type=str, default='./resources/robert', help='resources for Bert tokenizer') parser.add_argument('--save_path', type=str, default='./resources/ref.txt', help='path to save res') lowercase_ = parser.parse_args() main(args)
266
"""simple docstring""" import itertools import random import unittest import numpy as np from transformers import ASTFeatureExtractor from transformers.testing_utils import require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin lowercase_ = random.Random() if is_torch_available(): import torch def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase=1.0 , __UpperCamelCase=None , __UpperCamelCase=None ): """simple docstring""" if rng is None: __A = global_rng __A = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class snake_case ( unittest.TestCase ): '''simple docstring''' def __init__( self : Any, _lowerCamelCase : List[str], _lowerCamelCase : Any=7, _lowerCamelCase : Optional[int]=4_00, _lowerCamelCase : Optional[int]=20_00, _lowerCamelCase : Dict=1, _lowerCamelCase : Optional[Any]=0.0, _lowerCamelCase : int=1_60_00, _lowerCamelCase : Optional[int]=True, _lowerCamelCase : Dict=True, ): '''simple docstring''' __A = parent __A = batch_size __A = min_seq_length __A = max_seq_length __A = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) __A = feature_size __A = padding_value __A = sampling_rate __A = return_attention_mask __A = do_normalize def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : Optional[Any]=False, _lowerCamelCase : int=False ): '''simple docstring''' def _flatten(_lowerCamelCase : List[str] ): return list(itertools.chain(*_lowerCamelCase ) ) if equal_length: __A = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size __A = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff ) ] if numpify: __A = [np.asarray(_lowerCamelCase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : int = ASTFeatureExtractor def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' __A = ASTFeatureExtractionTester(self ) def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' # Tests that all call wrap to encode_plus and batch_encode_plus __A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 __A = [floats_list((1, x) )[0] for x in range(8_00, 14_00, 2_00 )] __A = [np.asarray(_lowerCamelCase ) for speech_input in speech_inputs] # Test not batched input __A = feat_extract(speech_inputs[0], return_tensors='''np''' ).input_values __A = feat_extract(np_speech_inputs[0], return_tensors='''np''' ).input_values self.assertTrue(np.allclose(_lowerCamelCase, _lowerCamelCase, atol=1e-3 ) ) # Test batched __A = feat_extract(_lowerCamelCase, padding=_lowerCamelCase, return_tensors='''np''' ).input_values __A = feat_extract(_lowerCamelCase, padding=_lowerCamelCase, return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(_lowerCamelCase, _lowerCamelCase ): self.assertTrue(np.allclose(_lowerCamelCase, _lowerCamelCase, atol=1e-3 ) ) # Test 2-D numpy arrays are batched. __A = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)] __A = np.asarray(_lowerCamelCase ) __A = feat_extract(_lowerCamelCase, return_tensors='''np''' ).input_values __A = feat_extract(_lowerCamelCase, return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(_lowerCamelCase, _lowerCamelCase ): self.assertTrue(np.allclose(_lowerCamelCase, _lowerCamelCase, atol=1e-3 ) ) @require_torch def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' import torch __A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __A = np.random.rand(1_00 ).astype(np.floataa ) __A = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: __A = feature_extractor.pad([{'''input_values''': inputs}], return_tensors='''np''' ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) __A = feature_extractor.pad([{'''input_values''': inputs}], return_tensors='''pt''' ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : Union[str, Any] ): '''simple docstring''' from datasets import load_dataset __A = load_dataset('''hf-internal-testing/librispeech_asr_dummy''', '''clean''', split='''validation''' ) # automatic decoding with librispeech __A = ds.sort('''id''' ).select(range(_lowerCamelCase ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] @require_torch def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' # fmt: off __A = torch.tensor( [-0.98_94, -1.27_76, -0.90_66, -1.27_76, -0.93_49, -1.26_09, -1.03_86, -1.27_76, -1.15_61, -1.27_76, -1.20_52, -1.27_23, -1.21_90, -1.21_32, -1.27_76, -1.11_33, -1.19_53, -1.13_43, -1.15_84, -1.22_03, -1.17_70, -1.24_74, -1.23_81, -1.19_36, -0.92_70, -0.83_17, -0.80_49, -0.77_06, -0.75_65, -0.78_69] ) # fmt: on __A = self._load_datasamples(1 ) __A = ASTFeatureExtractor() __A = feature_extractor(_lowerCamelCase, return_tensors='''pt''' ).input_values self.assertEquals(input_values.shape, (1, 10_24, 1_28) ) self.assertTrue(torch.allclose(input_values[0, 0, :30], _lowerCamelCase, atol=1e-4 ) )
266
1
"""simple docstring""" import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() lowercase_ = logging.get_logger(__name__) lowercase_ = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'encoder.layer_norm_for_extract': 'layer_norm_for_extract', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'label_embs_concat': 'label_embeddings_concat', 'mask_emb': 'masked_spec_embed', 'spk_proj': 'speaker_proj', } lowercase_ = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', 'label_embeddings_concat', 'speaker_proj', 'layer_norm_for_extract', ] def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" for attribute in key.split('''.''' ): __A = getattr(__UpperCamelCase , __UpperCamelCase ) if weight_type is not None: __A = getattr(__UpperCamelCase , __UpperCamelCase ).shape else: __A = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' f' {value.shape} for {full_name}' ) if weight_type == "weight": __A = value elif weight_type == "weight_g": __A = value elif weight_type == "weight_v": __A = value elif weight_type == "bias": __A = value else: __A = value logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" __A = [] __A = fairseq_model.state_dict() __A = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): __A = False if "conv_layers" in name: load_conv_layer( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , hf_model.config.feat_extract_norm == '''group''' , ) __A = True else: for key, mapped_key in MAPPING.items(): __A = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key): # special case since naming is very similar continue __A = True if "*" in mapped_key: __A = name.split(__UpperCamelCase )[0].split('''.''' )[-2] __A = mapped_key.replace('''*''' , __UpperCamelCase ) if "weight_g" in name: __A = '''weight_g''' elif "weight_v" in name: __A = '''weight_v''' elif "bias" in name: __A = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj __A = '''weight''' else: __A = None set_recursively(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) continue if not is_used: unused_weights.append(__UpperCamelCase ) logger.warning(f'Unused weights: {unused_weights}' ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" __A = full_name.split('''conv_layers.''' )[-1] __A = name.split('''.''' ) __A = int(items[0] ) __A = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) __A = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) __A = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.' ) __A = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' ) __A = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(__UpperCamelCase ) @torch.no_grad() def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=True ): """simple docstring""" if config_path is not None: __A = UniSpeechSatConfig.from_pretrained(__UpperCamelCase ) else: __A = UniSpeechSatConfig() __A = '''''' if is_finetuned: __A = UniSpeechSatForCTC(__UpperCamelCase ) else: __A = UniSpeechSatForPreTraining(__UpperCamelCase ) __A , __A , __A = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) __A = model[0].eval() recursively_load_weights(__UpperCamelCase , __UpperCamelCase ) hf_wavavec.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) lowercase_ = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
266
"""simple docstring""" def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = current_set.copy() for row_index, row in enumerate(__UpperCamelCase ): __A = row[0] for column_index, column in enumerate(__UpperCamelCase ): if magnitude == 0: __A = column continue __A = column / magnitude # Subtract to cancel term __A = current_set[0] __A = [first_row] __A = current_set[1::] for row in current_set: __A = [] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(__UpperCamelCase ) continue for column_index in range(len(__UpperCamelCase ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(__UpperCamelCase ) # Create next recursion iteration set if len(final_set[0] ) != 3: __A = final_set[0] __A = [] __A = [] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) __A = simplify(__UpperCamelCase ) for i in range(len(__UpperCamelCase ) ): resultant[i].insert(0 , current_first_column[i] ) resultant.insert(0 , __UpperCamelCase ) __A = resultant return final_set def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" if len(__UpperCamelCase ) == 0: raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) __A = len(__UpperCamelCase ) + 1 if any(len(__UpperCamelCase ) != _length for item in equations ): raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) for row in equations: if any(not isinstance(__UpperCamelCase , (int, float) ) for column in row ): raise ValueError('''solve_simultaneous() requires lists of integers''' ) if len(__UpperCamelCase ) == 1: return [equations[0][-1] / equations[0][0]] __A = equations.copy() if any(0 in row for row in data_set ): __A = data_set.copy() __A = [] for row_index, row in enumerate(__UpperCamelCase ): if 0 not in row: __A = data_set.pop(__UpperCamelCase ) break if not full_row: raise ValueError('''solve_simultaneous() requires at least 1 full equation''' ) data_set.insert(0 , __UpperCamelCase ) __A = data_set.copy() __A = simplify(__UpperCamelCase ) __A = simplified[::-1] __A = [] for row in simplified: __A = row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue __A = row.copy()[: len(__UpperCamelCase ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(__UpperCamelCase ) == 0: solutions.append(0 ) continue __A = temp_row[1::] __A = temp_row[::-1] for column_index, column in enumerate(__UpperCamelCase ): current_solution -= column * solutions[column_index] solutions.append(__UpperCamelCase ) __A = [] for item in solutions: final.append(float(round(__UpperCamelCase , 5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() lowercase_ = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
266
1
"""simple docstring""" import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=True , __UpperCamelCase="pt" ): """simple docstring""" __A = {'''add_prefix_space''': True} if isinstance(__UpperCamelCase , __UpperCamelCase ) and not line.startswith(''' ''' ) else {} __A = padding_side return tokenizer( [line] , max_length=__UpperCamelCase , padding='''max_length''' if pad_to_max_length else None , truncation=__UpperCamelCase , return_tensors=__UpperCamelCase , add_special_tokens=__UpperCamelCase , **__UpperCamelCase , ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , ): """simple docstring""" __A = input_ids.ne(__UpperCamelCase ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class snake_case ( _lowerCAmelCase ): '''simple docstring''' def __init__( self : Optional[Any], _lowerCamelCase : Any, _lowerCamelCase : Tuple, _lowerCamelCase : int, _lowerCamelCase : Dict, _lowerCamelCase : Tuple="train", _lowerCamelCase : str=None, _lowerCamelCase : int=None, _lowerCamelCase : Union[str, Any]=None, _lowerCamelCase : Optional[Any]="", ): '''simple docstring''' super().__init__() __A = Path(_lowerCamelCase ).joinpath(type_path + '''.source''' ) __A = Path(_lowerCamelCase ).joinpath(type_path + '''.target''' ) __A = self.get_char_lens(self.src_file ) __A = max_source_length __A = max_target_length assert min(self.src_lens ) > 0, f'found empty line in {self.src_file}' __A = tokenizer __A = prefix if n_obs is not None: __A = self.src_lens[:n_obs] __A = src_lang __A = tgt_lang def __len__( self : Union[str, Any] ): '''simple docstring''' return len(self.src_lens ) def __getitem__( self : Optional[Any], _lowerCamelCase : Optional[int] ): '''simple docstring''' __A = index + 1 # linecache starts at 1 __A = self.prefix + linecache.getline(str(self.src_file ), _lowerCamelCase ).rstrip('''\n''' ) __A = linecache.getline(str(self.tgt_file ), _lowerCamelCase ).rstrip('''\n''' ) assert source_line, f'empty source line for index {index}' assert tgt_line, f'empty tgt line for index {index}' # Need to add eos token manually for T5 if isinstance(self.tokenizer, _lowerCamelCase ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right __A = ( self.tokenizer.question_encoder if isinstance(self.tokenizer, _lowerCamelCase ) else self.tokenizer ) __A = self.tokenizer.generator if isinstance(self.tokenizer, _lowerCamelCase ) else self.tokenizer __A = encode_line(_lowerCamelCase, _lowerCamelCase, self.max_source_length, '''right''' ) __A = encode_line(_lowerCamelCase, _lowerCamelCase, self.max_target_length, '''right''' ) __A = source_inputs['''input_ids'''].squeeze() __A = target_inputs['''input_ids'''].squeeze() __A = source_inputs['''attention_mask'''].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ): '''simple docstring''' return [len(_lowerCamelCase ) for x in Path(_lowerCamelCase ).open().readlines()] def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : Union[str, Any] ): '''simple docstring''' __A = torch.stack([x['''input_ids'''] for x in batch] ) __A = torch.stack([x['''attention_mask'''] for x in batch] ) __A = torch.stack([x['''decoder_input_ids'''] for x in batch] ) __A = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer, _lowerCamelCase ) else self.tokenizer.pad_token_id ) __A = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer, _lowerCamelCase ) else self.tokenizer.pad_token_id ) __A = trim_batch(_lowerCamelCase, _lowerCamelCase ) __A , __A = trim_batch(_lowerCamelCase, _lowerCamelCase, attention_mask=_lowerCamelCase ) __A = { '''input_ids''': source_ids, '''attention_mask''': source_mask, '''decoder_input_ids''': y, } return batch lowercase_ = getLogger(__name__) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" return list(itertools.chain.from_iterable(__UpperCamelCase ) ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = get_git_info() save_json(__UpperCamelCase , os.path.join(__UpperCamelCase , '''git_log.json''' ) ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=4 , **__UpperCamelCase ): """simple docstring""" with open(__UpperCamelCase , '''w''' ) as f: json.dump(__UpperCamelCase , __UpperCamelCase , indent=__UpperCamelCase , **__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" with open(__UpperCamelCase ) as f: return json.load(__UpperCamelCase ) def lowerCAmelCase ( ): """simple docstring""" __A = git.Repo(search_parent_directories=__UpperCamelCase ) __A = { '''repo_id''': str(__UpperCamelCase ), '''repo_sha''': str(repo.head.object.hexsha ), '''repo_branch''': str(repo.active_branch ), '''hostname''': str(socket.gethostname() ), } return repo_infos def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" return list(map(__UpperCamelCase , __UpperCamelCase ) ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" with open(__UpperCamelCase , '''wb''' ) as f: return pickle.dump(__UpperCamelCase , __UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" def remove_articles(__UpperCamelCase ): return re.sub(r'''\b(a|an|the)\b''' , ''' ''' , __UpperCamelCase ) def white_space_fix(__UpperCamelCase ): return " ".join(text.split() ) def remove_punc(__UpperCamelCase ): __A = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(__UpperCamelCase ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(__UpperCamelCase ) ) ) ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" __A = normalize_answer(__UpperCamelCase ).split() __A = normalize_answer(__UpperCamelCase ).split() __A = Counter(__UpperCamelCase ) & Counter(__UpperCamelCase ) __A = sum(common.values() ) if num_same == 0: return 0 __A = 1.0 * num_same / len(__UpperCamelCase ) __A = 1.0 * num_same / len(__UpperCamelCase ) __A = (2 * precision * recall) / (precision + recall) return fa def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" return normalize_answer(__UpperCamelCase ) == normalize_answer(__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" assert len(__UpperCamelCase ) == len(__UpperCamelCase ) __A = 0 for hypo, pred in zip(__UpperCamelCase , __UpperCamelCase ): em += exact_match_score(__UpperCamelCase , __UpperCamelCase ) if len(__UpperCamelCase ) > 0: em /= len(__UpperCamelCase ) return {"em": em} def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" return model_prefix.startswith('''rag''' ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" __A = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead __A = '''dropout_rate''' for p in extra_params: if getattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): if not hasattr(__UpperCamelCase , __UpperCamelCase ) and not hasattr(__UpperCamelCase , equivalent_param[p] ): logger.info('''config doesn\'t have a `{}` attribute'''.format(__UpperCamelCase ) ) delattr(__UpperCamelCase , __UpperCamelCase ) continue __A = p if hasattr(__UpperCamelCase , __UpperCamelCase ) else equivalent_param[p] setattr(__UpperCamelCase , __UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) ) delattr(__UpperCamelCase , __UpperCamelCase ) return hparams, config
266
"""simple docstring""" from __future__ import annotations from typing import Any def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" if not postfix_notation: return 0 __A = {'''+''', '''-''', '''*''', '''/'''} __A = [] for token in postfix_notation: if token in operations: __A , __A = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(__UpperCamelCase ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
266
1
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_realm import RealmTokenizer lowercase_ = logging.get_logger(__name__) lowercase_ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} lowercase_ = { 'vocab_file': { 'google/realm-cc-news-pretrained-embedder': ( 'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt' ), 'google/realm-cc-news-pretrained-encoder': ( 'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt' ), 'google/realm-cc-news-pretrained-scorer': ( 'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt' ), 'google/realm-cc-news-pretrained-openqa': ( 'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt' ), 'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt', 'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt', 'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt', 'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt', }, 'tokenizer_file': { 'google/realm-cc-news-pretrained-embedder': ( 'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont' ), 'google/realm-cc-news-pretrained-encoder': ( 'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json' ), 'google/realm-cc-news-pretrained-scorer': ( 'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json' ), 'google/realm-cc-news-pretrained-openqa': ( 'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json' ), 'google/realm-orqa-nq-openqa': ( 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json' ), 'google/realm-orqa-nq-reader': ( 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json' ), 'google/realm-orqa-wq-openqa': ( 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json' ), 'google/realm-orqa-wq-reader': ( 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json' ), }, } lowercase_ = { 'google/realm-cc-news-pretrained-embedder': 512, 'google/realm-cc-news-pretrained-encoder': 512, 'google/realm-cc-news-pretrained-scorer': 512, 'google/realm-cc-news-pretrained-openqa': 512, 'google/realm-orqa-nq-openqa': 512, 'google/realm-orqa-nq-reader': 512, 'google/realm-orqa-wq-openqa': 512, 'google/realm-orqa-wq-reader': 512, } lowercase_ = { 'google/realm-cc-news-pretrained-embedder': {'do_lower_case': True}, 'google/realm-cc-news-pretrained-encoder': {'do_lower_case': True}, 'google/realm-cc-news-pretrained-scorer': {'do_lower_case': True}, 'google/realm-cc-news-pretrained-openqa': {'do_lower_case': True}, 'google/realm-orqa-nq-openqa': {'do_lower_case': True}, 'google/realm-orqa-nq-reader': {'do_lower_case': True}, 'google/realm-orqa-wq-openqa': {'do_lower_case': True}, 'google/realm-orqa-wq-reader': {'do_lower_case': True}, } class snake_case ( _lowerCAmelCase ): '''simple docstring''' A_ : Optional[int] = VOCAB_FILES_NAMES A_ : List[str] = PRETRAINED_VOCAB_FILES_MAP A_ : Tuple = PRETRAINED_INIT_CONFIGURATION A_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A_ : str = RealmTokenizer def __init__( self : List[Any], _lowerCamelCase : List[str]=None, _lowerCamelCase : Dict=None, _lowerCamelCase : Optional[Any]=True, _lowerCamelCase : Optional[int]="[UNK]", _lowerCamelCase : Tuple="[SEP]", _lowerCamelCase : Union[str, Any]="[PAD]", _lowerCamelCase : Tuple="[CLS]", _lowerCamelCase : List[Any]="[MASK]", _lowerCamelCase : Optional[int]=True, _lowerCamelCase : Tuple=None, **_lowerCamelCase : Any, ): '''simple docstring''' super().__init__( _lowerCamelCase, tokenizer_file=_lowerCamelCase, do_lower_case=_lowerCamelCase, unk_token=_lowerCamelCase, sep_token=_lowerCamelCase, pad_token=_lowerCamelCase, cls_token=_lowerCamelCase, mask_token=_lowerCamelCase, tokenize_chinese_chars=_lowerCamelCase, strip_accents=_lowerCamelCase, **_lowerCamelCase, ) __A = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''', _lowerCamelCase ) != do_lower_case or normalizer_state.get('''strip_accents''', _lowerCamelCase ) != strip_accents or normalizer_state.get('''handle_chinese_chars''', _lowerCamelCase ) != tokenize_chinese_chars ): __A = getattr(_lowerCamelCase, normalizer_state.pop('''type''' ) ) __A = do_lower_case __A = strip_accents __A = tokenize_chinese_chars __A = normalizer_class(**_lowerCamelCase ) __A = do_lower_case def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : Tuple, **_lowerCamelCase : Optional[Any] ): '''simple docstring''' __A = PaddingStrategy.MAX_LENGTH __A = text __A = kwargs.pop('''text_pair''', _lowerCamelCase ) __A = kwargs.pop('''return_tensors''', _lowerCamelCase ) __A = { '''input_ids''': [], '''attention_mask''': [], '''token_type_ids''': [], } for idx, candidate_text in enumerate(_lowerCamelCase ): if batch_text_pair is not None: __A = batch_text_pair[idx] else: __A = None __A = super().__call__(_lowerCamelCase, _lowerCamelCase, return_tensors=_lowerCamelCase, **_lowerCamelCase ) __A = encoded_candidates.get('''input_ids''' ) __A = encoded_candidates.get('''attention_mask''' ) __A = encoded_candidates.get('''token_type_ids''' ) if encoded_input_ids is not None: output_data["input_ids"].append(_lowerCamelCase ) if encoded_attention_mask is not None: output_data["attention_mask"].append(_lowerCamelCase ) if encoded_token_type_ids is not None: output_data["token_type_ids"].append(_lowerCamelCase ) __A = {key: item for key, item in output_data.items() if len(_lowerCamelCase ) != 0} return BatchEncoding(_lowerCamelCase, tensor_type=_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : Tuple, _lowerCamelCase : Any=None ): '''simple docstring''' __A = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : List[int], _lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' __A = [self.sep_token_id] __A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : str, _lowerCamelCase : Optional[str] = None ): '''simple docstring''' __A = self._tokenizer.model.save(_lowerCamelCase, name=_lowerCamelCase ) return tuple(_lowerCamelCase )
266
"""simple docstring""" import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class snake_case ( unittest.TestCase ): '''simple docstring''' def __init__( self : Optional[Any], _lowerCamelCase : Tuple, _lowerCamelCase : List[str]=13, _lowerCamelCase : Optional[Any]=7, _lowerCamelCase : Optional[int]=True, _lowerCamelCase : int=True, _lowerCamelCase : List[str]=True, _lowerCamelCase : Optional[Any]=True, _lowerCamelCase : int=99, _lowerCamelCase : Optional[int]=32, _lowerCamelCase : Tuple=5, _lowerCamelCase : Tuple=4, _lowerCamelCase : str=37, _lowerCamelCase : Union[str, Any]="gelu", _lowerCamelCase : int=0.1, _lowerCamelCase : List[Any]=0.1, _lowerCamelCase : Dict=5_12, _lowerCamelCase : List[Any]=16, _lowerCamelCase : Any=2, _lowerCamelCase : Any=0.02, _lowerCamelCase : Dict=4, ): '''simple docstring''' __A = parent __A = batch_size __A = seq_length __A = is_training __A = use_attention_mask __A = use_token_type_ids __A = use_labels __A = vocab_size __A = hidden_size __A = num_hidden_layers __A = num_attention_heads __A = intermediate_size __A = hidden_act __A = hidden_dropout_prob __A = attention_probs_dropout_prob __A = max_position_embeddings __A = type_vocab_size __A = type_sequence_label_size __A = initializer_range __A = num_choices def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' __A = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) __A = None if self.use_attention_mask: __A = random_attention_mask([self.batch_size, self.seq_length] ) __A = None if self.use_token_type_ids: __A = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size ) __A = RoFormerConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=_lowerCamelCase, initializer_range=self.initializer_range, ) return config, input_ids, token_type_ids, attention_mask def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = self.prepare_config_and_inputs() __A , __A , __A , __A = config_and_inputs __A = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask} return config, inputs_dict @require_flax class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : Dict = True A_ : Tuple = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' __A = FlaxRoFormerModelTester(self ) @slow def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' for model_class_name in self.all_model_classes: __A = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''', from_pt=_lowerCamelCase ) __A = model(np.ones((1, 1) ) ) self.assertIsNotNone(_lowerCamelCase ) @require_flax class snake_case ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' ) __A = jnp.array([[0, 1, 2, 3, 4, 5]] ) __A = model(_lowerCamelCase )[0] __A = 5_00_00 __A = (1, 6, vocab_size) self.assertEqual(output.shape, _lowerCamelCase ) __A = jnp.array( [[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3], _lowerCamelCase, atol=1e-4 ) )
266
1
"""simple docstring""" # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import platform import numpy as np import psutil import torch from accelerate import __version__ as version from accelerate.commands.config import default_config_file, load_config_from_file from ..utils import is_npu_available, is_xpu_available def lowerCAmelCase ( __UpperCamelCase=None ): """simple docstring""" if subparsers is not None: __A = subparsers.add_parser('''env''' ) else: __A = argparse.ArgumentParser('''Accelerate env command''' ) parser.add_argument( '''--config_file''' , default=__UpperCamelCase , help='''The config file to use for the default values in the launching script.''' ) if subparsers is not None: parser.set_defaults(func=__UpperCamelCase ) return parser def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = torch.__version__ __A = torch.cuda.is_available() __A = is_xpu_available() __A = is_npu_available() __A = '''Not found''' # Get the default from the config file. if args.config_file is not None or os.path.isfile(__UpperCamelCase ): __A = load_config_from_file(args.config_file ).to_dict() __A = { '''`Accelerate` version''': version, '''Platform''': platform.platform(), '''Python version''': platform.python_version(), '''Numpy version''': np.__version__, '''PyTorch version (GPU?)''': f'{pt_version} ({pt_cuda_available})', '''PyTorch XPU available''': str(__UpperCamelCase ), '''PyTorch NPU available''': str(__UpperCamelCase ), '''System RAM''': f'{psutil.virtual_memory().total / 1_0_2_4 ** 3:.2f} GB', } if pt_cuda_available: __A = torch.cuda.get_device_name() print('''\nCopy-and-paste the text below in your GitHub issue\n''' ) print('''\n'''.join([f'- {prop}: {val}' for prop, val in info.items()] ) ) print('''- `Accelerate` default config:''' if args.config_file is None else '''- `Accelerate` config passed:''' ) __A = ( '''\n'''.join([f'\t- {prop}: {val}' for prop, val in accelerate_config.items()] ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else f'\t{accelerate_config}' ) print(__UpperCamelCase ) __A = accelerate_config return info def lowerCAmelCase ( ): """simple docstring""" __A = env_command_parser() __A = parser.parse_args() env_command(__UpperCamelCase ) return 0 if __name__ == "__main__": raise SystemExit(main())
266
"""simple docstring""" from collections import defaultdict from math import ceil, sqrt def lowerCAmelCase ( __UpperCamelCase = 1_0_0_0_0_0_0 , __UpperCamelCase = 1_0 ): """simple docstring""" __A = defaultdict(__UpperCamelCase ) for outer_width in range(3 , (t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: __A = max( ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 ) else: __A = 1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(__UpperCamelCase , outer_width - 1 , 2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 1_0 ) if __name__ == "__main__": print(F'''{solution() = }''')
266
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { 'google/realm-cc-news-pretrained-embedder': ( 'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-encoder': ( 'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-scorer': ( 'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-openqa': ( 'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json' ), 'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json', 'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json', 'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json', 'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json', # See all REALM models at https://huggingface.co/models?filter=realm } class snake_case ( _lowerCAmelCase ): '''simple docstring''' A_ : int = "realm" def __init__( self : Optional[int], _lowerCamelCase : List[Any]=3_05_22, _lowerCamelCase : Optional[int]=7_68, _lowerCamelCase : List[Any]=1_28, _lowerCamelCase : str=12, _lowerCamelCase : int=12, _lowerCamelCase : int=8, _lowerCamelCase : Any=30_72, _lowerCamelCase : Any="gelu_new", _lowerCamelCase : List[str]=0.1, _lowerCamelCase : Optional[int]=0.1, _lowerCamelCase : int=5_12, _lowerCamelCase : str=2, _lowerCamelCase : int=0.02, _lowerCamelCase : Optional[Any]=1e-12, _lowerCamelCase : str=2_56, _lowerCamelCase : Dict=10, _lowerCamelCase : List[Any]=1e-3, _lowerCamelCase : Optional[Any]=5, _lowerCamelCase : str=3_20, _lowerCamelCase : int=13_35_37_18, _lowerCamelCase : Any=50_00, _lowerCamelCase : Dict=1, _lowerCamelCase : Union[str, Any]=0, _lowerCamelCase : Any=2, **_lowerCamelCase : List[Any], ): '''simple docstring''' super().__init__(pad_token_id=_lowerCamelCase, bos_token_id=_lowerCamelCase, eos_token_id=_lowerCamelCase, **_lowerCamelCase ) # Common config __A = vocab_size __A = max_position_embeddings __A = hidden_size __A = retriever_proj_size __A = num_hidden_layers __A = num_attention_heads __A = num_candidates __A = intermediate_size __A = hidden_act __A = hidden_dropout_prob __A = attention_probs_dropout_prob __A = initializer_range __A = type_vocab_size __A = layer_norm_eps # Reader config __A = span_hidden_size __A = max_span_width __A = reader_layer_norm_eps __A = reader_beam_size __A = reader_seq_len # Retrieval config __A = num_block_records __A = searcher_beam_size
266
"""simple docstring""" import numpy as np import torch from torch.utils.data import DataLoader from accelerate.utils.dataclasses import DistributedType class snake_case : '''simple docstring''' def __init__( self : Optional[int], _lowerCamelCase : Optional[int]=2, _lowerCamelCase : Optional[int]=3, _lowerCamelCase : int=64, _lowerCamelCase : List[str]=None ): '''simple docstring''' __A = np.random.default_rng(_lowerCamelCase ) __A = length __A = rng.normal(size=(length,) ).astype(np.floataa ) __A = a * self.x + b + rng.normal(scale=0.1, size=(length,) ).astype(np.floataa ) def __len__( self : str ): '''simple docstring''' return self.length def __getitem__( self : Dict, _lowerCamelCase : Optional[int] ): '''simple docstring''' return {"x": self.x[i], "y": self.y[i]} class snake_case ( torch.nn.Module ): '''simple docstring''' def __init__( self : Optional[Any], _lowerCamelCase : Tuple=0, _lowerCamelCase : Any=0, _lowerCamelCase : Optional[Any]=False ): '''simple docstring''' super().__init__() __A = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) __A = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) __A = True def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : Optional[Any]=None ): '''simple docstring''' if self.first_batch: print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' ) __A = False return x * self.a[0] + self.b[0] class snake_case ( torch.nn.Module ): '''simple docstring''' def __init__( self : str, _lowerCamelCase : Optional[Any]=0, _lowerCamelCase : Any=0, _lowerCamelCase : List[Any]=False ): '''simple docstring''' super().__init__() __A = torch.nn.Parameter(torch.tensor(_lowerCamelCase ).float() ) __A = torch.nn.Parameter(torch.tensor(_lowerCamelCase ).float() ) __A = True def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : List[str]=None ): '''simple docstring''' if self.first_batch: print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' ) __A = False return x * self.a + self.b def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase = 1_6 ): """simple docstring""" from datasets import load_dataset from transformers import AutoTokenizer __A = AutoTokenizer.from_pretrained('''bert-base-cased''' ) __A = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''} __A = load_dataset('''csv''' , data_files=__UpperCamelCase ) __A = datasets['''train'''].unique('''label''' ) __A = {v: i for i, v in enumerate(__UpperCamelCase )} def tokenize_function(__UpperCamelCase ): # max_length=None => use the model max length (it's actually the default) __A = tokenizer( examples['''sentence1'''] , examples['''sentence2'''] , truncation=__UpperCamelCase , max_length=__UpperCamelCase , padding='''max_length''' ) if "label" in examples: __A = [label_to_id[l] for l in examples['''label''']] return outputs # Apply the method we just defined to all the examples in all the splits of the dataset __A = datasets.map( __UpperCamelCase , batched=__UpperCamelCase , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , ) def collate_fn(__UpperCamelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(__UpperCamelCase , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' ) return tokenizer.pad(__UpperCamelCase , padding='''longest''' , return_tensors='''pt''' ) # Instantiate dataloaders. __A = DataLoader(tokenized_datasets['''train'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=2 ) __A = DataLoader(tokenized_datasets['''validation'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=1 ) return train_dataloader, eval_dataloader
266
1
"""simple docstring""" import argparse import json import os from collections import OrderedDict import numpy as np import tensorflow as tf import torch def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = os.path.join(args.tf_model_dir , '''parameters.json''' ) __A = json.loads(open(__UpperCamelCase ).read() ) if not params: raise ValueError( f'It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.' ) if not args.output.endswith('''.pt''' ): __A = args.output + '''.pt''' __A = OrderedDict() with tf.device('''/CPU:0''' ): __A = tf.train.load_checkpoint(args.tf_model_dir ) __A = reader.get_variable_to_shape_map() for key_name in shapes.keys(): __A = reader.get_tensor(__UpperCamelCase ).astype(np.floataa ) if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ): continue if key_name.startswith('''pasts/''' ): if key_name.startswith('''pasts/mlp''' ): __A = int(key_name[9] ) elif key_name.startswith('''pasts/out''' ): __A = 8 __A = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time __A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix __A = torch.tensor(__UpperCamelCase ) elif key_name.startswith('''model/moe''' ): __A = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/switch_gating/kernel''' ): __A = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player __A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix __A = torch.tensor(__UpperCamelCase ) elif key_name.endswith('''/softmlp/kernel''' ): __A = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player __A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix __A = torch.tensor(__UpperCamelCase ) elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ): __A = key_name[-9:-7] for i in range(1_6 ): __A = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer) __A = ( vnp[i].transpose([1, 0] ).copy() ) # In Mesh-Tensorflow, it is one array, so it is divided __A = torch.tensor(__UpperCamelCase ) elif key_name.startswith('''model/mlp''' ): __A = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/p1/kernel''' ): __A = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player __A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix __A = torch.tensor(__UpperCamelCase ) elif key_name.endswith('''/p1/bias''' ): __A = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player __A = vnp.copy() # same because it is one dimensional __A = torch.tensor(__UpperCamelCase ) elif key_name.endswith('''/p2/kernel''' ): __A = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player __A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix __A = torch.tensor(__UpperCamelCase ) elif key_name.endswith('''/p2/bias''' ): __A = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player __A = vnp.copy() # same because it is one dimensional __A = torch.tensor(__UpperCamelCase ) elif key_name.startswith('''model/ln''' ): __A = int(key_name[8:].split('''/''' )[0] ) if key_name.endswith('''/b''' ): __A = '''model.blocks.%d.feed_forward.norm.bias''' % player __A = vnp.copy() # same because it is one dimensional __A = torch.tensor(__UpperCamelCase ) elif key_name.endswith('''/g''' ): __A = '''model.blocks.%d.feed_forward.norm.weight''' % player __A = vnp.copy() # same because it is one dimensional __A = torch.tensor(__UpperCamelCase ) elif key_name.startswith('''model/att''' ): __A = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/qkv/kernel''' ): __A = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum __A = state[:, 0, :, :] __A = state[:, 1, :, :] __A = state[:, 2, :, :] __A = ( state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix __A = ( state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix __A = ( state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix __A = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player __A = torch.tensor(__UpperCamelCase ) __A = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player __A = torch.tensor(__UpperCamelCase ) __A = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player __A = torch.tensor(__UpperCamelCase ) elif key_name.endswith('''/o/kernel''' ): __A = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player __A = ( vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy() ) # Mesh-Tensorflow is a diagonal matrix __A = torch.tensor(__UpperCamelCase ) elif key_name.startswith('''model/an''' ): __A = int(key_name[8:].split('''/''' )[0] ) if key_name.endswith('''/b''' ): __A = '''model.blocks.%d.self_attn.norm.bias''' % player __A = vnp.copy() # same because it is one dimensional __A = torch.tensor(__UpperCamelCase ) elif key_name.endswith('''/g''' ): __A = '''model.blocks.%d.self_attn.norm.weight''' % player __A = vnp.copy() # same because it is one dimensional __A = torch.tensor(__UpperCamelCase ) elif ( key_name.startswith('''model/wte''' ) or key_name.startswith('''model/wpe''' ) or key_name.startswith('''model/ete''' ) ): __A = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[ key_name[-3:] ] __A = '''model.%s.weight''' % nlayer __A = vnp.copy() # same in embedded __A = torch.tensor(__UpperCamelCase ) if key_name.startswith('''model/wte''' ): __A = '''lm_head.weight''' __A = vnp.copy() # same in embedded __A = torch.tensor(__UpperCamelCase ) elif key_name.startswith('''model/wob''' ): __A = '''final_logits_bias''' __A = vnp.copy() # same in embedded __A = state.reshape((1, -1) ) __A = torch.tensor(__UpperCamelCase ) elif key_name == "model/dense/kernel": __A = '''model.last_project.weight''' __A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix __A = torch.tensor(__UpperCamelCase ) elif key_name == "model/dense_1/bias": __A = '''model.last_project.bias''' __A = vnp.copy() # same because it is one dimensional __A = torch.tensor(__UpperCamelCase ) torch.save(__UpperCamelCase , args.output ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser( description='model converter.', formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument('--tf_model_dir', metavar='PATH', type=str, required=True, help='import model') parser.add_argument('--output', metavar='PATH', type=str, required=True, help='output model') lowercase_ = parser.parse_args() convert_tf_gptsan_to_pt(args)
266
"""simple docstring""" from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo lowercase_ = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n' lowercase_ = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n' lowercase_ = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case ( datasets.Metric ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''', id='''token''' ), id='''sequence''' ), '''references''': datasets.Sequence( datasets.Sequence(datasets.Value('''string''', id='''token''' ), id='''sequence''' ), id='''references''' ), } ), ) def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : List[List[List[str]]], _lowerCamelCase : List[List[str]], _lowerCamelCase : int = 1, _lowerCamelCase : int = 4, ): '''simple docstring''' return { "google_bleu": gleu_score.corpus_gleu( list_of_references=_lowerCamelCase, hypotheses=_lowerCamelCase, min_len=_lowerCamelCase, max_len=_lowerCamelCase ) }
266
1
"""simple docstring""" from __future__ import annotations def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" __A = [] __A , __A = input_list[low:mid], input_list[mid : high + 1] while left and right: result.append((left if left[0] <= right[0] else right).pop(0 ) ) __A = result + left + right return input_list def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" if len(__UpperCamelCase ) <= 1: return input_list __A = list(__UpperCamelCase ) # iteration for two-way merging __A = 2 while p <= len(__UpperCamelCase ): # getting low, high and middle value for merge-sort of single list for i in range(0 , len(__UpperCamelCase ) , __UpperCamelCase ): __A = i __A = i + p - 1 __A = (low + high + 1) // 2 __A = merge(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # final merge of last two parts if p * 2 >= len(__UpperCamelCase ): __A = i __A = merge(__UpperCamelCase , 0 , __UpperCamelCase , len(__UpperCamelCase ) - 1 ) break p *= 2 return input_list if __name__ == "__main__": lowercase_ = input('Enter numbers separated by a comma:\n').strip() if user_input == "": lowercase_ = [] else: lowercase_ = [int(item.strip()) for item in user_input.split(',')] print(iter_merge_sort(unsorted))
266
"""simple docstring""" class snake_case : '''simple docstring''' def __init__( self : List[str], _lowerCamelCase : list[int] ): '''simple docstring''' __A = len(_lowerCamelCase ) __A = [0] * len_array if len_array > 0: __A = array[0] for i in range(1, _lowerCamelCase ): __A = self.prefix_sum[i - 1] + array[i] def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : int, _lowerCamelCase : int ): '''simple docstring''' if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : int ): '''simple docstring''' __A = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(_lowerCamelCase ) return False if __name__ == "__main__": import doctest doctest.testmod()
266
1
"""simple docstring""" from __future__ import annotations from collections.abc import Generator def lowerCAmelCase ( ): """simple docstring""" __A = {} __A = 2 while True: __A = factor_map.pop(__UpperCamelCase , __UpperCamelCase ) if factor: __A = factor + prime while x in factor_map: x += factor __A = factor else: __A = prime yield prime prime += 1 def lowerCAmelCase ( __UpperCamelCase = 1e1_0 ): """simple docstring""" __A = sieve() __A = 1 while True: __A = next(__UpperCamelCase ) if (2 * prime * n) > limit: return n # Ignore the next prime as the reminder will be 2. next(__UpperCamelCase ) n += 2 if __name__ == "__main__": print(solution())
266
"""simple docstring""" import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowercase_ = logging.get_logger(__name__) lowercase_ = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } lowercase_ = { 'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'}, 'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'}, 'tokenizer_config_file': { 'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json' }, } lowercase_ = {'facebook/blenderbot-3B': 128} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def lowerCAmelCase ( ): """simple docstring""" __A = ( list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) ) ) __A = bs[:] __A = 0 for b in range(2**8 ): if b not in bs: bs.append(__UpperCamelCase ) cs.append(2**8 + n ) n += 1 __A = [chr(__UpperCamelCase ) for n in cs] return dict(zip(__UpperCamelCase , __UpperCamelCase ) ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = set() __A = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __A = char return pairs class snake_case ( _lowerCAmelCase ): '''simple docstring''' A_ : Tuple = VOCAB_FILES_NAMES A_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP A_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A_ : Optional[Any] = ["input_ids", "attention_mask"] def __init__( self : Dict, _lowerCamelCase : Optional[Any], _lowerCamelCase : List[str], _lowerCamelCase : Dict="replace", _lowerCamelCase : Any="<s>", _lowerCamelCase : Optional[int]="</s>", _lowerCamelCase : Dict="</s>", _lowerCamelCase : List[Any]="<s>", _lowerCamelCase : List[str]="<unk>", _lowerCamelCase : str="<pad>", _lowerCamelCase : Any="<mask>", _lowerCamelCase : Any=False, **_lowerCamelCase : Tuple, ): '''simple docstring''' __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else bos_token __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else eos_token __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else sep_token __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else cls_token __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else unk_token __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else mask_token super().__init__( errors=_lowerCamelCase, bos_token=_lowerCamelCase, eos_token=_lowerCamelCase, unk_token=_lowerCamelCase, sep_token=_lowerCamelCase, cls_token=_lowerCamelCase, pad_token=_lowerCamelCase, mask_token=_lowerCamelCase, add_prefix_space=_lowerCamelCase, **_lowerCamelCase, ) with open(_lowerCamelCase, encoding='''utf-8''' ) as vocab_handle: __A = json.load(_lowerCamelCase ) __A = {v: k for k, v in self.encoder.items()} __A = errors # how to handle errors in decoding __A = bytes_to_unicode() __A = {v: k for k, v in self.byte_encoder.items()} with open(_lowerCamelCase, encoding='''utf-8''' ) as merges_handle: __A = merges_handle.read().split('''\n''' )[1:-1] __A = [tuple(merge.split() ) for merge in bpe_merges] __A = dict(zip(_lowerCamelCase, range(len(_lowerCamelCase ) ) ) ) __A = {} __A = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __A = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' return len(self.encoder ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' return dict(self.encoder, **self.added_tokens_encoder ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : List[Any] ): '''simple docstring''' if token in self.cache: return self.cache[token] __A = tuple(_lowerCamelCase ) __A = get_pairs(_lowerCamelCase ) if not pairs: return token while True: __A = min(_lowerCamelCase, key=lambda _lowerCamelCase : self.bpe_ranks.get(_lowerCamelCase, float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break __A , __A = bigram __A = [] __A = 0 while i < len(_lowerCamelCase ): try: __A = word.index(_lowerCamelCase, _lowerCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __A = j if word[i] == first and i < len(_lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __A = tuple(_lowerCamelCase ) __A = new_word if len(_lowerCamelCase ) == 1: break else: __A = get_pairs(_lowerCamelCase ) __A = ''' '''.join(_lowerCamelCase ) __A = word return word def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : Dict ): '''simple docstring''' __A = [] for token in re.findall(self.pat, _lowerCamelCase ): __A = ''''''.join( self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowerCamelCase ).split(''' ''' ) ) return bpe_tokens def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : Dict ): '''simple docstring''' return self.encoder.get(_lowerCamelCase, self.encoder.get(self.unk_token ) ) def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : Any ): '''simple docstring''' return self.decoder.get(_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : Dict ): '''simple docstring''' __A = ''''''.join(_lowerCamelCase ) __A = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''', errors=self.errors ) return text def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : str, _lowerCamelCase : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(_lowerCamelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __A = os.path.join( _lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) __A = os.path.join( _lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(_lowerCamelCase, '''w''', encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=_lowerCamelCase, ensure_ascii=_lowerCamelCase ) + '''\n''' ) __A = 0 with open(_lowerCamelCase, '''w''', encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda _lowerCamelCase : kv[1] ): if index != token_index: logger.warning( f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.' ''' Please check that the tokenizer is not corrupted!''' ) __A = token_index writer.write(''' '''.join(_lowerCamelCase ) + '''\n''' ) index += 1 return vocab_file, merge_file def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : List[int], _lowerCamelCase : Optional[List[int]] = None, _lowerCamelCase : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCamelCase, token_ids_a=_lowerCamelCase, already_has_special_tokens=_lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(_lowerCamelCase )) + [1] return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1] def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : List[int], _lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' __A = [self.sep_token_id] __A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : Union[str, Any], _lowerCamelCase : List[str]=False, **_lowerCamelCase : List[Any] ): '''simple docstring''' __A = kwargs.pop('''add_prefix_space''', self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(_lowerCamelCase ) > 0 and not text[0].isspace()): __A = ''' ''' + text return (text, kwargs) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : List[int], _lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' return token_ids_a + [self.eos_token_id] def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : "Conversation" ): '''simple docstring''' __A = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(''' ''' + text ) else: # Generated responses should contain them already. inputs.append(_lowerCamelCase ) __A = ''' '''.join(_lowerCamelCase ) __A = self.encode(_lowerCamelCase ) if len(_lowerCamelCase ) > self.model_max_length: __A = input_ids[-self.model_max_length :] logger.warning(f'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' ) return input_ids
266
1
"""simple docstring""" import argparse import OmegaConf import torch from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" __A = OmegaConf.load(__UpperCamelCase ) __A = torch.load(__UpperCamelCase , map_location='''cpu''' )['''model'''] __A = list(state_dict.keys() ) # extract state_dict for VQVAE __A = {} __A = '''first_stage_model.''' for key in keys: if key.startswith(__UpperCamelCase ): __A = state_dict[key] # extract state_dict for UNetLDM __A = {} __A = '''model.diffusion_model.''' for key in keys: if key.startswith(__UpperCamelCase ): __A = state_dict[key] __A = config.model.params.first_stage_config.params __A = config.model.params.unet_config.params __A = VQModel(**__UpperCamelCase ).eval() vqvae.load_state_dict(__UpperCamelCase ) __A = UNetLDMModel(**__UpperCamelCase ).eval() unet.load_state_dict(__UpperCamelCase ) __A = DDIMScheduler( timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=__UpperCamelCase , ) __A = LDMPipeline(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) pipeline.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument('--checkpoint_path', type=str, required=True) parser.add_argument('--config_path', type=str, required=True) parser.add_argument('--output_path', type=str, required=True) lowercase_ = parser.parse_args() convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
266
"""simple docstring""" import importlib import inspect import json import os import re import shutil import sys from pathlib import Path from typing import Dict, Optional, Union from urllib import request from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info from packaging import version from .. import __version__ from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging lowercase_ = ( 'https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py' ) lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name def lowerCAmelCase ( ): """simple docstring""" __A = '''https://pypi.org/pypi/diffusers/json''' __A = json.loads(request.urlopen(__UpperCamelCase ).read() )['''releases'''].keys() return sorted(__UpperCamelCase , key=lambda __UpperCamelCase : version.Version(__UpperCamelCase ) ) def lowerCAmelCase ( ): """simple docstring""" if HF_MODULES_CACHE in sys.path: return sys.path.append(__UpperCamelCase ) os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase ) __A = Path(__UpperCamelCase ) / '''__init__.py''' if not init_path.exists(): init_path.touch() def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" init_hf_modules() __A = Path(__UpperCamelCase ) / name # If the parent module does not exist yet, recursively create it. if not dynamic_module_path.parent.exists(): create_dynamic_module(dynamic_module_path.parent ) os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase ) __A = dynamic_module_path / '''__init__.py''' if not init_path.exists(): init_path.touch() def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" with open(__UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f: __A = f.read() # Imports of the form `import .xxx` __A = re.findall('''^\s*import\s+\.(\S+)\s*$''' , __UpperCamelCase , flags=re.MULTILINE ) # Imports of the form `from .xxx import yyy` relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , __UpperCamelCase , flags=re.MULTILINE ) # Unique-ify return list(set(__UpperCamelCase ) ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = False __A = [module_file] __A = [] # Let's recurse through all relative imports while not no_change: __A = [] for f in files_to_check: new_imports.extend(get_relative_imports(__UpperCamelCase ) ) __A = Path(__UpperCamelCase ).parent __A = [str(module_path / m ) for m in new_imports] __A = [f for f in new_import_files if f not in all_relative_imports] __A = [f'{f}.py' for f in new_import_files] __A = len(__UpperCamelCase ) == 0 all_relative_imports.extend(__UpperCamelCase ) return all_relative_imports def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" with open(__UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f: __A = f.read() # Imports of the form `import xxx` __A = re.findall('''^\s*import\s+(\S+)\s*$''' , __UpperCamelCase , flags=re.MULTILINE ) # Imports of the form `from xxx import yyy` imports += re.findall('''^\s*from\s+(\S+)\s+import''' , __UpperCamelCase , flags=re.MULTILINE ) # Only keep the top-level module __A = [imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )] # Unique-ify and test we got them all __A = list(set(__UpperCamelCase ) ) __A = [] for imp in imports: try: importlib.import_module(__UpperCamelCase ) except ImportError: missing_packages.append(__UpperCamelCase ) if len(__UpperCamelCase ) > 0: raise ImportError( '''This modeling file requires the following packages that were not found in your environment: ''' f'{", ".join(__UpperCamelCase )}. Run `pip install {" ".join(__UpperCamelCase )}`' ) return get_relative_imports(__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" __A = module_path.replace(os.path.sep , '''.''' ) __A = importlib.import_module(__UpperCamelCase ) if class_name is None: return find_pipeline_class(__UpperCamelCase ) return getattr(__UpperCamelCase , __UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" from ..pipelines import DiffusionPipeline __A = dict(inspect.getmembers(__UpperCamelCase , inspect.isclass ) ) __A = None for cls_name, cls in cls_members.items(): if ( cls_name != DiffusionPipeline.__name__ and issubclass(cls , __UpperCamelCase ) and cls.__module__.split('''.''' )[0] != "diffusers" ): if pipeline_class is not None: raise ValueError( f'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:' f' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in' f' {loaded_module}.' ) __A = cls return pipeline_class def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , ): """simple docstring""" __A = str(__UpperCamelCase ) __A = os.path.join(__UpperCamelCase , __UpperCamelCase ) if os.path.isfile(__UpperCamelCase ): __A = module_file_or_url __A = '''local''' elif pretrained_model_name_or_path.count('''/''' ) == 0: __A = get_diffusers_versions() # cut ".dev0" __A = '''v''' + '''.'''.join(__version__.split('''.''' )[:3] ) # retrieve github version that matches if revision is None: __A = latest_version if latest_version[1:] in available_versions else '''main''' logger.info(f'Defaulting to latest_version: {revision}.' ) elif revision in available_versions: __A = f'v{revision}' elif revision == "main": __A = revision else: raise ValueError( f'`custom_revision`: {revision} does not exist. Please make sure to choose one of' f' {", ".join(available_versions + ["main"] )}.' ) # community pipeline on GitHub __A = COMMUNITY_PIPELINES_URL.format(revision=__UpperCamelCase , pipeline=__UpperCamelCase ) try: __A = cached_download( __UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , proxies=__UpperCamelCase , resume_download=__UpperCamelCase , local_files_only=__UpperCamelCase , use_auth_token=__UpperCamelCase , ) __A = '''git''' __A = pretrained_model_name_or_path + '''.py''' except EnvironmentError: logger.error(f'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' ) raise else: try: # Load from URL or cache if already cached __A = hf_hub_download( __UpperCamelCase , __UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , proxies=__UpperCamelCase , resume_download=__UpperCamelCase , local_files_only=__UpperCamelCase , use_auth_token=__UpperCamelCase , ) __A = os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) ) except EnvironmentError: logger.error(f'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' ) raise # Check we have all the requirements in our environment __A = check_imports(__UpperCamelCase ) # Now we move the module inside our cached dynamic modules. __A = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule create_dynamic_module(__UpperCamelCase ) __A = Path(__UpperCamelCase ) / full_submodule if submodule == "local" or submodule == "git": # We always copy local files (we could hash the file to see if there was a change, and give them the name of # that hash, to only copy when there is a modification but it seems overkill for now). # The only reason we do the copy is to avoid putting too many folders in sys.path. shutil.copy(__UpperCamelCase , submodule_path / module_file ) for module_needed in modules_needed: __A = f'{module_needed}.py' shutil.copy(os.path.join(__UpperCamelCase , __UpperCamelCase ) , submodule_path / module_needed ) else: # Get the commit hash # TODO: we will get this info in the etag soon, so retrieve it from there and not here. if isinstance(__UpperCamelCase , __UpperCamelCase ): __A = use_auth_token elif use_auth_token is True: __A = HfFolder.get_token() else: __A = None __A = model_info(__UpperCamelCase , revision=__UpperCamelCase , token=__UpperCamelCase ).sha # The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the # benefit of versioning. __A = submodule_path / commit_hash __A = full_submodule + os.path.sep + commit_hash create_dynamic_module(__UpperCamelCase ) if not (submodule_path / module_file).exists(): shutil.copy(__UpperCamelCase , submodule_path / module_file ) # Make sure we also have every file with relative for module_needed in modules_needed: if not (submodule_path / module_needed).exists(): get_cached_module_file( __UpperCamelCase , f'{module_needed}.py' , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , resume_download=__UpperCamelCase , proxies=__UpperCamelCase , use_auth_token=__UpperCamelCase , revision=__UpperCamelCase , local_files_only=__UpperCamelCase , ) return os.path.join(__UpperCamelCase , __UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , **__UpperCamelCase , ): """simple docstring""" __A = get_cached_module_file( __UpperCamelCase , __UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , resume_download=__UpperCamelCase , proxies=__UpperCamelCase , use_auth_token=__UpperCamelCase , revision=__UpperCamelCase , local_files_only=__UpperCamelCase , ) return get_class_in_module(__UpperCamelCase , final_module.replace('''.py''' , '''''' ) )
266
1
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() lowercase_ = logging.get_logger(__name__) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase=False ): """simple docstring""" __A = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'blocks.{i}.norm1.weight', f'vit.encoder.layer.{i}.layernorm_before.weight') ) rename_keys.append((f'blocks.{i}.norm1.bias', f'vit.encoder.layer.{i}.layernorm_before.bias') ) rename_keys.append((f'blocks.{i}.attn.proj.weight', f'vit.encoder.layer.{i}.attention.output.dense.weight') ) rename_keys.append((f'blocks.{i}.attn.proj.bias', f'vit.encoder.layer.{i}.attention.output.dense.bias') ) rename_keys.append((f'blocks.{i}.norm2.weight', f'vit.encoder.layer.{i}.layernorm_after.weight') ) rename_keys.append((f'blocks.{i}.norm2.bias', f'vit.encoder.layer.{i}.layernorm_after.bias') ) rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'vit.encoder.layer.{i}.intermediate.dense.weight') ) rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'vit.encoder.layer.{i}.intermediate.dense.bias') ) rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'vit.encoder.layer.{i}.output.dense.weight') ) rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'vit.encoder.layer.{i}.output.dense.bias') ) # projection layer + position embeddings rename_keys.extend( [ ('''cls_token''', '''vit.embeddings.cls_token'''), ('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''), ('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''), ('''pos_embed''', '''vit.embeddings.position_embeddings'''), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" __A = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('''norm.weight''', '''vit.layernorm.weight'''), ('''norm.bias''', '''vit.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) return rename_keys def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ): """simple docstring""" for i in range(config.num_hidden_layers ): if base_model: __A = '''''' else: __A = '''vit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) __A = state_dict.pop(f'blocks.{i}.attn.qkv.weight' ) __A = state_dict.pop(f'blocks.{i}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict __A = in_proj_weight[ : config.hidden_size, : ] __A = in_proj_bias[: config.hidden_size] __A = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] __A = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] __A = in_proj_weight[ -config.hidden_size :, : ] __A = in_proj_bias[-config.hidden_size :] def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(__UpperCamelCase , __UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" __A = dct.pop(__UpperCamelCase ) __A = val def lowerCAmelCase ( ): """simple docstring""" __A = '''http://images.cocodataset.org/val2017/000000039769.jpg''' __A = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw ) return im @torch.no_grad() def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=True ): """simple docstring""" __A = ViTConfig() # patch_size if model_name[-1] == "8": __A = 8 # set labels if required if not base_model: __A = 1_0_0_0 __A = '''huggingface/label-files''' __A = '''imagenet-1k-id2label.json''' __A = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) ) __A = {int(__UpperCamelCase ): v for k, v in idalabel.items()} __A = idalabel __A = {v: k for k, v in idalabel.items()} # size of the architecture if model_name in ["dino_vits8", "dino_vits16"]: __A = 3_8_4 __A = 1_5_3_6 __A = 1_2 __A = 6 # load original model from torch hub __A = torch.hub.load('''facebookresearch/dino:main''' , __UpperCamelCase ) original_model.eval() # load state_dict of original model, remove and rename some keys __A = original_model.state_dict() if base_model: remove_classification_head_(__UpperCamelCase ) __A = create_rename_keys(__UpperCamelCase , base_model=__UpperCamelCase ) for src, dest in rename_keys: rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) read_in_q_k_v(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # load HuggingFace model if base_model: __A = ViTModel(__UpperCamelCase , add_pooling_layer=__UpperCamelCase ).eval() else: __A = ViTForImageClassification(__UpperCamelCase ).eval() model.load_state_dict(__UpperCamelCase ) # Check outputs on an image, prepared by ViTImageProcessor __A = ViTImageProcessor() __A = image_processor(images=prepare_img() , return_tensors='''pt''' ) __A = encoding['''pixel_values'''] __A = model(__UpperCamelCase ) if base_model: __A = original_model(__UpperCamelCase ) assert torch.allclose(__UpperCamelCase , outputs.last_hidden_state[:, 0, :] , atol=1e-1 ) else: __A = original_model(__UpperCamelCase ) assert logits.shape == outputs.logits.shape assert torch.allclose(__UpperCamelCase , outputs.logits , atol=1e-3 ) Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase ) print(f'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(__UpperCamelCase ) print(f'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='dino_vitb16', type=str, help='Name of the model trained with DINO you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--base_model', action='store_true', help='Whether to only convert the base model (no projection head weights).', ) parser.set_defaults(base_model=True) lowercase_ = parser.parse_args() convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
266
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class snake_case ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : Optional[int] ): '''simple docstring''' for model_result in results.values(): for batch_size, sequence_length in zip(model_result['''bs'''], model_result['''ss'''] ): __A = model_result['''result'''][batch_size][sequence_length] self.assertIsNotNone(_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = '''sgugger/tiny-distilbert-classification''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, only_pretrain_model=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, torchscript=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == '''cpu''', '''Cant do half precision''' ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, fpaa=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = AutoConfig.from_pretrained(_lowerCamelCase ) # set architectures equal to `None` __A = None __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase, configs=[config] ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == '''cpu''', '''Can\'t do half precision''' ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], fpaa=_lowerCamelCase, multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = AutoConfig.from_pretrained(_lowerCamelCase ) __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase, configs=[config] ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' __A = '''sshleifer/tinier_bart''' __A = AutoConfig.from_pretrained(_lowerCamelCase ) __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase, configs=[config] ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' __A = AutoConfig.from_pretrained(_lowerCamelCase ) __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase, configs=[config] ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = '''sshleifer/tinier_bart''' __A = AutoConfig.from_pretrained(_lowerCamelCase ) __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase, configs=[config] ) __A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' with tempfile.TemporaryDirectory() as tmp_dir: __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, save_to_csv=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], inference_time_csv_file=os.path.join(_lowerCamelCase, '''inf_time.csv''' ), train_memory_csv_file=os.path.join(_lowerCamelCase, '''train_mem.csv''' ), inference_memory_csv_file=os.path.join(_lowerCamelCase, '''inf_mem.csv''' ), train_time_csv_file=os.path.join(_lowerCamelCase, '''train_time.csv''' ), env_info_csv_file=os.path.join(_lowerCamelCase, '''env.csv''' ), multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) benchmark.run() self.assertTrue(Path(os.path.join(_lowerCamelCase, '''inf_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCamelCase, '''train_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCamelCase, '''inf_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCamelCase, '''train_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCamelCase, '''env.csv''' ) ).exists() ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' __A = '''sshleifer/tiny-gpt2''' def _check_summary_is_not_empty(_lowerCamelCase : List[Any] ): self.assertTrue(hasattr(_lowerCamelCase, '''sequential''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''cumulative''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''current''' ) ) self.assertTrue(hasattr(_lowerCamelCase, '''total''' ) ) with tempfile.TemporaryDirectory() as tmp_dir: __A = PyTorchBenchmarkArguments( models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], log_filename=os.path.join(_lowerCamelCase, '''log.txt''' ), log_print=_lowerCamelCase, trace_memory_line_by_line=_lowerCamelCase, multi_process=_lowerCamelCase, ) __A = PyTorchBenchmark(_lowerCamelCase ) __A = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(_lowerCamelCase, '''log.txt''' ) ).exists() )
266
1
"""simple docstring""" # coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys lowercase_ = '3' print('Python version:', sys.version) print('OS platform:', platform.platform()) print('OS architecture:', platform.machine()) try: import torch print('Torch version:', torch.__version__) print('Cuda available:', torch.cuda.is_available()) print('Cuda version:', torch.version.cuda) print('CuDNN version:', torch.backends.cudnn.version()) print('Number of GPUs available:', torch.cuda.device_count()) except ImportError: print('Torch version:', None) try: import transformers print('transformers version:', transformers.__version__) except ImportError: print('transformers version:', None)
266
"""simple docstring""" import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowercase_ = get_tests_dir('fixtures/test_sentencepiece_no_bos.model') @require_sentencepiece @require_tokenizers class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : Optional[Any] = PegasusTokenizer A_ : int = PegasusTokenizerFast A_ : Optional[Any] = True A_ : Union[str, Any] = True def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing __A = PegasusTokenizer(_lowerCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' return PegasusTokenizer.from_pretrained('''google/pegasus-large''' ) def _SCREAMING_SNAKE_CASE ( self : int, **_lowerCamelCase : List[Any] ): '''simple docstring''' return PegasusTokenizer.from_pretrained(self.tmpdirname, **_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : Dict ): '''simple docstring''' return ("This is a test", "This is a test") def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' __A = '''</s>''' __A = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ), _lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ), _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' __A = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0], '''<pad>''' ) self.assertEqual(vocab_keys[1], '''</s>''' ) self.assertEqual(vocab_keys[-1], '''v''' ) self.assertEqual(len(_lowerCamelCase ), 11_03 ) def _SCREAMING_SNAKE_CASE ( self : int ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size, 11_03 ) def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) __A = self.tokenizer_class.from_pretrained(self.tmpdirname ) __A = ( '''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important''' ''' </s> <pad> <pad> <pad>''' ) __A = rust_tokenizer([raw_input_str], return_tensors=_lowerCamelCase, add_special_tokens=_lowerCamelCase ).input_ids[0] __A = py_tokenizer([raw_input_str], return_tensors=_lowerCamelCase, add_special_tokens=_lowerCamelCase ).input_ids[0] self.assertListEqual(_lowerCamelCase, _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' __A = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word __A = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.''' __A = [2, 4_13, 6_15, 1_14, 3, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1] __A = tokenizer([raw_input_str], return_tensors=_lowerCamelCase ).input_ids[0] self.assertListEqual(_lowerCamelCase, _lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 9_61_03 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 1_03 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_05 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 10_24 __A = '''To ensure a smooth flow of bank resolutions.''' __A = [4_13, 6_15, 1_14, 22_91, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1] __A = tokenizer([raw_input_str], return_tensors=_lowerCamelCase ).input_ids[0] self.assertListEqual(_lowerCamelCase, _lowerCamelCase ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = ['''This is going to be way too long.''' * 1_50, '''short example'''] __A = ['''not super long but more than 5 tokens''', '''tiny'''] __A = self._large_tokenizer(_lowerCamelCase, padding=_lowerCamelCase, truncation=_lowerCamelCase, return_tensors='''pt''' ) __A = self._large_tokenizer( text_target=_lowerCamelCase, max_length=5, padding=_lowerCamelCase, truncation=_lowerCamelCase, return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 10_24) assert batch.attention_mask.shape == (2, 10_24) assert targets["input_ids"].shape == (2, 5) assert len(_lowerCamelCase ) == 2 # input_ids, attention_mask. @slow def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' # fmt: off __A = {'''input_ids''': [[3_89_79, 1_43, 1_84_85, 6_06, 1_30, 2_66_69, 8_76_86, 1_21, 5_41_89, 11_29, 1_11, 2_66_69, 8_76_86, 1_21, 91_14, 1_47_87, 1_21, 1_32_49, 1_58, 5_92, 9_56, 1_21, 1_46_21, 3_15_76, 1_43, 6_26_13, 1_08, 96_88, 9_30, 4_34_30, 1_15_62, 6_26_13, 3_04, 1_08, 1_14_43, 8_97, 1_08, 93_14, 1_74_15, 6_33_99, 1_08, 1_14_43, 76_14, 1_83_16, 1_18, 42_84, 71_48, 1_24_30, 1_43, 14_00, 2_57_03, 1_58, 1_11, 42_84, 71_48, 1_17_72, 1_43, 2_12_97, 10_64, 1_58, 1_22, 2_04, 35_06, 17_54, 11_33, 1_47_87, 15_81, 1_15, 3_32_24, 44_82, 1_11, 13_55, 1_10, 2_91_73, 3_17, 5_08_33, 1_08, 2_01_47, 9_46_65, 1_11, 7_71_98, 1_07, 1], [1_10, 6_26_13, 1_17, 6_38, 1_12, 11_33, 1_21, 2_00_98, 13_55, 7_90_50, 1_38_72, 1_35, 15_96, 5_35_41, 13_52, 1_41, 1_30_39, 55_42, 1_24, 3_02, 5_18, 1_11, 2_68, 29_56, 1_15, 1_49, 44_27, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_39, 12_35, 27_99, 1_82_89, 1_77_80, 2_04, 1_09, 94_74, 12_96, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_lowerCamelCase, model_name='''google/bigbird-pegasus-large-arxiv''', revision='''ba85d0851d708441f91440d509690f1ab6353415''', ) @require_sentencepiece @require_tokenizers class snake_case ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A_ : str = PegasusTokenizer A_ : Union[str, Any] = PegasusTokenizerFast A_ : Any = True A_ : str = True def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing __A = PegasusTokenizer(_lowerCamelCase, offset=0, mask_token_sent=_lowerCamelCase, mask_token='''[MASK]''' ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' ) def _SCREAMING_SNAKE_CASE ( self : Optional[int], **_lowerCamelCase : Dict ): '''simple docstring''' return PegasusTokenizer.from_pretrained(self.tmpdirname, **_lowerCamelCase ) def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : List[str] ): '''simple docstring''' return ("This is a test", "This is a test") def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' __A = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) __A = self.tokenizer_class.from_pretrained(self.tmpdirname ) __A = ( '''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>''' ''' <pad> <pad> <pad>''' ) __A = rust_tokenizer([raw_input_str], return_tensors=_lowerCamelCase, add_special_tokens=_lowerCamelCase ).input_ids[0] __A = py_tokenizer([raw_input_str], return_tensors=_lowerCamelCase, add_special_tokens=_lowerCamelCase ).input_ids[0] self.assertListEqual(_lowerCamelCase, _lowerCamelCase ) @require_torch def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' __A = ['''This is going to be way too long.''' * 10_00, '''short example'''] __A = ['''not super long but more than 5 tokens''', '''tiny'''] __A = self._large_tokenizer(_lowerCamelCase, padding=_lowerCamelCase, truncation=_lowerCamelCase, return_tensors='''pt''' ) __A = self._large_tokenizer( text_target=_lowerCamelCase, max_length=5, padding=_lowerCamelCase, truncation=_lowerCamelCase, return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 40_96) assert batch.attention_mask.shape == (2, 40_96) assert targets["input_ids"].shape == (2, 5) assert len(_lowerCamelCase ) == 2 # input_ids, attention_mask. def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' __A = ( '''This is an example string that is used to test the original TF implementation against the HF''' ''' implementation''' ) __A = self._large_tokenizer(_lowerCamelCase ).input_ids self.assertListEqual( _lowerCamelCase, [1_82, 1_17, 1_42, 5_87, 42_11, 1_20, 1_17, 2_63, 1_12, 8_04, 1_09, 8_56, 2_50_16, 31_37, 4_64, 1_09, 2_69_55, 31_37, 1], )
266
1
"""simple docstring""" import argparse import os import re import packaging.version lowercase_ = 'examples/' lowercase_ = { 'examples': (re.compile(R'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'), 'init': (re.compile(R'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'), 'setup': (re.compile(R'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), R'\1version="VERSION",'), 'doc': (re.compile(R'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'), } lowercase_ = { 'init': 'src/diffusers/__init__.py', 'setup': 'setup.py', } lowercase_ = 'README.md' def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" with open(__UpperCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __A = f.read() __A , __A = REPLACE_PATTERNS[pattern] __A = replace.replace('''VERSION''' , __UpperCamelCase ) __A = re_pattern.sub(__UpperCamelCase , __UpperCamelCase ) with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.write(__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" for folder, directories, fnames in os.walk(__UpperCamelCase ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('''research_projects''' ) if "legacy" in directories: directories.remove('''legacy''' ) for fname in fnames: if fname.endswith('''.py''' ): update_version_in_file(os.path.join(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase , pattern='''examples''' ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase=False ): """simple docstring""" for pattern, fname in REPLACE_FILES.items(): update_version_in_file(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) if not patch: update_version_in_examples(__UpperCamelCase ) def lowerCAmelCase ( ): """simple docstring""" __A = '''🤗 Transformers currently provides the following architectures''' __A = '''1. Want to contribute a new model?''' with open(__UpperCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __A = f.readlines() # Find the start of the list. __A = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 __A = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith('''1.''' ): __A = lines[index].replace( '''https://huggingface.co/docs/diffusers/main/model_doc''' , '''https://huggingface.co/docs/diffusers/model_doc''' , ) index += 1 with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(__UpperCamelCase ) def lowerCAmelCase ( ): """simple docstring""" with open(REPLACE_FILES['''init'''] , '''r''' ) as f: __A = f.read() __A = REPLACE_PATTERNS['''init'''][0].search(__UpperCamelCase ).groups()[0] return packaging.version.parse(__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase=False ): """simple docstring""" __A = get_version() if patch and default_version.is_devrelease: raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' ) if default_version.is_devrelease: __A = default_version.base_version elif patch: __A = f'{default_version.major}.{default_version.minor}.{default_version.micro + 1}' else: __A = f'{default_version.major}.{default_version.minor + 1}.0' # Now let's ask nicely if that's the right one. __A = input(f'Which version are you releasing? [{default_version}]' ) if len(__UpperCamelCase ) == 0: __A = default_version print(f'Updating version to {version}.' ) global_version_update(__UpperCamelCase , patch=__UpperCamelCase ) def lowerCAmelCase ( ): """simple docstring""" __A = get_version() __A = f'{current_version.major}.{current_version.minor + 1}.0.dev0' __A = current_version.base_version # Check with the user we got that right. __A = input(f'Which version are we developing now? [{dev_version}]' ) if len(__UpperCamelCase ) == 0: __A = dev_version print(f'Updating version to {version}.' ) global_version_update(__UpperCamelCase ) # print("Cleaning main README, don't forget to run `make fix-copies`.") # clean_main_ref_in_model_list() if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.') parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.') lowercase_ = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('Nothing to do after a patch :-)') else: post_release_work()
266
"""simple docstring""" import re def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" return [char.split() for char in re.split(r'''[^ a-z A-Z 0-9 \s]''' , str_ )] def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = split_input(str_ ) return "".join( [''''''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" try: __A = split_input(__UpperCamelCase ) if upper: __A = ''''''.join( [ separator.join([char.upper() for char in sub_str] ) for sub_str in string_split ] ) else: __A = ''''''.join( [ separator.join([char.lower() for char in sub_str] ) for sub_str in string_split ] ) return res_str except IndexError: return "not valid string" def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" return to_simple_case(__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" try: __A = to_simple_case(__UpperCamelCase ) return res_str[0].lower() + res_str[1:] except IndexError: return "not valid string" def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" return to_complex_case(__UpperCamelCase , __UpperCamelCase , '''_''' ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" return to_complex_case(__UpperCamelCase , __UpperCamelCase , '''-''' ) if __name__ == "__main__": __import__('doctest').testmod()
266
1