code
stringlengths
82
53.2k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' import unittest from accelerate import debug_launcher from accelerate.test_utils import require_cpu, test_ops, test_script @require_cpu class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def a ( self ): '''simple docstring''' debug_launcher(test_script.main ) def a ( self ): '''simple docstring''' debug_launcher(test_ops.main )
444
import time from dataclasses import dataclass from multiprocessing import Pool from unittest import TestCase from unittest.mock import patch import multiprocess import numpy as np import pytest from datasets.utils.py_utils import ( NestedDataStructure, asdict, iflatmap_unordered, map_nested, temp_seed, temporary_assignment, zip_dict, ) from .utils import require_tf, require_torch def _UpperCAmelCase (UpperCamelCase__ : Union[str, Any] ): # picklable for multiprocessing return x.sum() def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] ): # picklable for multiprocessing return i + 1 @dataclass class lowerCAmelCase__ : '''simple docstring''' __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 class lowerCAmelCase__ ( a): '''simple docstring''' def _lowerCamelCase ( self) -> Optional[Any]: _A : str = {} _A : Optional[int] = [] _A : Optional[int] = 1 _A : Any = [1, 2] _A : Optional[Any] = {"a": 1, "b": 2} _A : int = {"a": [1, 2], "b": [3, 4]} _A : int = {"a": {"1": 1}, "b": 2} _A : List[str] = {"a": 1, "b": 2, "c": 3, "d": 4} _A : int = {} _A : List[Any] = [] _A : List[str] = 2 _A : Optional[int] = [2, 3] _A : Dict = {"a": 2, "b": 3} _A : List[Any] = {"a": [2, 3], "b": [4, 5]} _A : str = {"a": {"1": 2}, "b": 3} _A : Optional[Any] = {"a": 2, "b": 3, "c": 4, "d": 5} self.assertEqual(map_nested(__lowerCamelCase , __lowerCamelCase) , __lowerCamelCase) self.assertEqual(map_nested(__lowerCamelCase , __lowerCamelCase) , __lowerCamelCase) self.assertEqual(map_nested(__lowerCamelCase , __lowerCamelCase) , __lowerCamelCase) self.assertEqual(map_nested(__lowerCamelCase , __lowerCamelCase) , __lowerCamelCase) self.assertEqual(map_nested(__lowerCamelCase , __lowerCamelCase) , __lowerCamelCase) self.assertEqual(map_nested(__lowerCamelCase , __lowerCamelCase) , __lowerCamelCase) self.assertEqual(map_nested(__lowerCamelCase , __lowerCamelCase) , __lowerCamelCase) self.assertEqual(map_nested(__lowerCamelCase , __lowerCamelCase) , __lowerCamelCase) _A : Optional[Any] = 2 self.assertEqual(map_nested(__lowerCamelCase , __lowerCamelCase , num_proc=__lowerCamelCase) , __lowerCamelCase) self.assertEqual(map_nested(__lowerCamelCase , __lowerCamelCase , num_proc=__lowerCamelCase) , __lowerCamelCase) self.assertEqual(map_nested(__lowerCamelCase , __lowerCamelCase , num_proc=__lowerCamelCase) , __lowerCamelCase) self.assertEqual(map_nested(__lowerCamelCase , __lowerCamelCase , num_proc=__lowerCamelCase) , __lowerCamelCase) self.assertEqual(map_nested(__lowerCamelCase , __lowerCamelCase , num_proc=__lowerCamelCase) , __lowerCamelCase) self.assertEqual(map_nested(__lowerCamelCase , __lowerCamelCase , num_proc=__lowerCamelCase) , __lowerCamelCase) self.assertEqual(map_nested(__lowerCamelCase , __lowerCamelCase , num_proc=__lowerCamelCase) , __lowerCamelCase) self.assertEqual(map_nested(__lowerCamelCase , __lowerCamelCase , num_proc=__lowerCamelCase) , __lowerCamelCase) _A : str = {"a": np.eye(2), "b": np.zeros(3), "c": np.ones(2)} _A : Optional[int] = {"a": 2, "b": 0, "c": 2} _A : Optional[Any] = { "a": np.eye(2).astype(__lowerCamelCase), "b": np.zeros(3).astype(__lowerCamelCase), "c": np.ones(2).astype(__lowerCamelCase), } self.assertEqual(map_nested(__lowerCamelCase , __lowerCamelCase , map_numpy=__lowerCamelCase) , __lowerCamelCase) self.assertEqual( {k: v.tolist() for k, v in map_nested(__lowerCamelCase , __lowerCamelCase , map_numpy=__lowerCamelCase).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) self.assertEqual(map_nested(__lowerCamelCase , __lowerCamelCase , map_numpy=__lowerCamelCase , num_proc=__lowerCamelCase) , __lowerCamelCase) self.assertEqual( {k: v.tolist() for k, v in map_nested(__lowerCamelCase , __lowerCamelCase , map_numpy=__lowerCamelCase , num_proc=__lowerCamelCase).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) with self.assertRaises(__lowerCamelCase): # can't pickle a local lambda map_nested(lambda __lowerCamelCase: x + 1 , __lowerCamelCase , num_proc=__lowerCamelCase) def _lowerCamelCase ( self) -> Union[str, Any]: _A : Tuple = {"a": 1, "b": 2} _A : Any = {"a": 3, "b": 4} _A : int = {"a": 5, "b": 6} _A : int = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))]) self.assertEqual(sorted(zip_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase)) , __lowerCamelCase) def _lowerCamelCase ( self) -> int: class lowerCAmelCase__ : '''simple docstring''' __SCREAMING_SNAKE_CASE = "bar" _A : List[str] = Foo() self.assertEqual(foo.my_attr , "bar") with temporary_assignment(__lowerCamelCase , "my_attr" , "BAR"): self.assertEqual(foo.my_attr , "BAR") self.assertEqual(foo.my_attr , "bar") @pytest.mark.parametrize( "iterable_length, num_proc, expected_num_proc" , [ (1, None, 1), (1, 1, 1), (2, None, 1), (2, 1, 1), (2, 2, 1), (2, 3, 1), (3, 2, 1), (16, 16, 16), (16, 17, 16), (17, 16, 16), ] , ) def _UpperCAmelCase (UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ): with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch( "datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool: _A : List[Any] = {f"{i}": i for i in range(UpperCamelCase__ )} _A : Tuple = map_nested(lambda UpperCamelCase__ : x + 10 , UpperCamelCase__ , num_proc=UpperCamelCase__ , parallel_min_length=16 ) if expected_num_proc == 1: assert mock_single_map_nested.called assert not mock_multiprocessing_pool.called else: assert not mock_single_map_nested.called assert mock_multiprocessing_pool.called assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc class lowerCAmelCase__ ( a): '''simple docstring''' @require_tf def _lowerCamelCase ( self) -> int: import tensorflow as tf from tensorflow.keras import layers _A : Optional[int] = layers.Dense(2) def gen_random_output(): _A : Optional[Any] = tf.random.uniform((1, 3)) return model(__lowerCamelCase).numpy() with temp_seed(4_2 , set_tensorflow=__lowerCamelCase): _A : Dict = gen_random_output() with temp_seed(4_2 , set_tensorflow=__lowerCamelCase): _A : str = gen_random_output() _A : Tuple = gen_random_output() np.testing.assert_equal(__lowerCamelCase , __lowerCamelCase) self.assertGreater(np.abs(outa - outa).sum() , 0) @require_torch def _lowerCamelCase ( self) -> Tuple: import torch def gen_random_output(): _A : List[str] = torch.nn.Linear(3 , 2) _A : List[str] = torch.rand(1 , 3) return model(__lowerCamelCase).detach().numpy() with temp_seed(4_2 , set_pytorch=__lowerCamelCase): _A : Optional[Any] = gen_random_output() with temp_seed(4_2 , set_pytorch=__lowerCamelCase): _A : List[str] = gen_random_output() _A : Dict = gen_random_output() np.testing.assert_equal(__lowerCamelCase , __lowerCamelCase) self.assertGreater(np.abs(outa - outa).sum() , 0) def _lowerCamelCase ( self) -> int: def gen_random_output(): return np.random.rand(1 , 3) with temp_seed(4_2): _A : List[str] = gen_random_output() with temp_seed(4_2): _A : List[str] = gen_random_output() _A : Optional[int] = gen_random_output() np.testing.assert_equal(__lowerCamelCase , __lowerCamelCase) self.assertGreater(np.abs(outa - outa).sum() , 0) @pytest.mark.parametrize("input_data" , [{}] ) def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] ): _A : int = NestedDataStructure(UpperCamelCase__ ).data assert output_data == input_data @pytest.mark.parametrize( "data, expected_output" , [ ({}, []), ([], []), ("foo", ["foo"]), (["foo", "bar"], ["foo", "bar"]), ([["foo", "bar"]], ["foo", "bar"]), ([[["foo"], ["bar"]]], ["foo", "bar"]), ([[["foo"], "bar"]], ["foo", "bar"]), ({"a": 1, "b": 2}, [1, 2]), ({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]), ({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]), ({"a": {"1": 1}, "b": 2}, [1, 2]), ({"a": {"1": [1]}, "b": 2}, [1, 2]), ({"a": {"1": [1]}, "b": [2]}, [1, 2]), ] , ) def _UpperCAmelCase (UpperCamelCase__ : Any , UpperCamelCase__ : Dict ): _A : List[Any] = NestedDataStructure(UpperCamelCase__ ).flatten() assert output == expected_output def _UpperCAmelCase (): _A : int = A(x=1 , y="foobar" ) _A : Any = {"x": 1, "y": "foobar"} assert asdict(UpperCamelCase__ ) == expected_output _A : int = {"a": {"b": A(x=10 , y="foo" )}, "c": [A(x=20 , y="bar" )]} _A : int = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]} assert asdict(UpperCamelCase__ ) == expected_output with pytest.raises(UpperCamelCase__ ): asdict([1, A(x=10 , y="foo" )] ) def _UpperCAmelCase (UpperCamelCase__ : str ): return text.split() def _UpperCAmelCase (UpperCamelCase__ : int ): yield (time.time(), content) time.sleep(2 ) yield (time.time(), content) def _UpperCAmelCase (): with Pool(2 ) as pool: _A : str = list(iflatmap_unordered(UpperCamelCase__ , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) ) assert out.count("hello" ) == 10 assert out.count("there" ) == 10 assert len(UpperCamelCase__ ) == 20 # check multiprocess from pathos (uses dill for pickling) with multiprocess.Pool(2 ) as pool: _A : Dict = list(iflatmap_unordered(UpperCamelCase__ , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) ) assert out.count("hello" ) == 10 assert out.count("there" ) == 10 assert len(UpperCamelCase__ ) == 20 # check that we get items as fast as possible with Pool(2 ) as pool: _A : Dict = [] for yield_time, content in iflatmap_unordered( UpperCamelCase__ , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"content": "a"}, {"content": "b"}] ): assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded" out.append(UpperCamelCase__ ) assert out.count("a" ) == 2 assert out.count("b" ) == 2 assert len(UpperCamelCase__ ) == 4
503
0
"""simple docstring""" import torch import torch.nn as nn from transformers.modeling_utils import ModuleUtilsMixin from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class snake_case_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): """simple docstring""" @register_to_config def __init__( self , __a , __a , __a , __a , __a , __a , __a , __a , __a , __a = False , ): """simple docstring""" super().__init__() A__ = nn.Embedding(__a , __a ) A__ = nn.Embedding(__a , __a ) A__ = False A__ = nn.Dropout(p=__a ) A__ = TaConfig( vocab_size=__a , d_model=__a , num_heads=__a , d_kv=__a , d_ff=__a , dropout_rate=__a , feed_forward_proj=__a , is_decoder=__a , is_encoder_decoder=__a , ) A__ = nn.ModuleList() for lyr_num in range(__a ): A__ = TaBlock(__a ) self.encoders.append(__a ) A__ = TaLayerNorm(__a ) A__ = nn.Dropout(p=__a ) def _UpperCAmelCase ( self , __a , __a ): """simple docstring""" A__ = self.token_embedder(__a ) A__ = encoder_input_tokens.shape[1] A__ = torch.arange(__a , device=encoder_input_tokens.device ) x += self.position_encoding(__a ) A__ = self.dropout_pre(__a ) # inverted the attention mask A__ = encoder_input_tokens.size() A__ = self.get_extended_attention_mask(__a , __a ) for lyr in self.encoders: A__ = lyr(__a , __a )[0] A__ = self.layer_norm(__a ) return self.dropout_post(__a ), encoder_inputs_mask
554
"""simple docstring""" import torch from transformers import AutoModel class snake_case_ ( torch.nn.Module ): """simple docstring""" def __init__( self , __a="sayef/fsner-bert-base-uncased" ): """simple docstring""" super(__a , self ).__init__() A__ = AutoModel.from_pretrained(__a , return_dict=__a ) A__ = torch.nn.CosineSimilarity(3 , 1E-08 ) A__ = torch.nn.Softmax(dim=1 ) def _UpperCAmelCase ( self , **__a ): """simple docstring""" return self.bert(**__a ).last_hidden_state def _UpperCAmelCase ( self , __a ): """simple docstring""" return token_embeddings.sum(2 , keepdim=__a ) def _UpperCAmelCase ( self , __a , __a , __a=1 ): """simple docstring""" return self.softmax(T * self.cos(__a , __a ) ) def _UpperCAmelCase ( self , __a , __a ): """simple docstring""" A__ = W_supports['sizes'].tolist() A__ = W_supports['start_token_id'].item() A__ = W_supports['end_token_id'].item() del W_supports["sizes"] del W_supports["start_token_id"] del W_supports["end_token_id"] A__ = self.BERT(**__a ) A__ = self.BERT(**__a ) A__ = None A__ = None A__ = W_supports['input_ids'] == start_token_id A__ = W_supports['input_ids'] == end_token_id for i, size in enumerate(__a ): if i == 0: A__ = 0 else: A__ = support_sizes[i - 1] A__ = S[s : s + size][start_token_masks[s : s + size]] A__ = S[s : s + size][end_token_masks[s : s + size]] A__ = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 ) A__ = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 ) if p_starts is not None: A__ = torch.vstack((p_starts, p_start) ) A__ = torch.vstack((p_ends, p_end) ) else: A__ = p_start A__ = p_end return p_starts, p_ends
554
1
'''simple docstring''' from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def _UpperCamelCase ( ) -> List[Any]: lowerCamelCase_ = HfArgumentParser(__UpperCamelCase ) lowerCamelCase_ = parser.parse_args_into_dataclasses()[0] lowerCamelCase_ = TensorFlowBenchmark(args=__UpperCamelCase ) try: lowerCamelCase_ = parser.parse_args_into_dataclasses()[0] except ValueError as e: lowerCamelCase_ = 'Arg --no_{0} is no longer used, please use --no-{0} instead.' lowerCamelCase_ = ' '.join(str(__UpperCamelCase ).split(' ' )[:-1] ) lowerCamelCase_ = '' lowerCamelCase_ = eval(str(__UpperCamelCase ).split(' ' )[-1] ) lowerCamelCase_ = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(__UpperCamelCase ) if len(__UpperCamelCase ) > 0: lowerCamelCase_ = full_error_msg + begin_error_msg + str(__UpperCamelCase ) raise ValueError(__UpperCamelCase ) benchmark.run() if __name__ == "__main__": main()
42
from collections import Counter from pathlib import Path from typing import Optional, Tuple import yaml class _UpperCAmelCase ( yaml.SafeLoader ): def _snake_case ( self : Dict , UpperCAmelCase : Union[str, Any]): SCREAMING_SNAKE_CASE_ :List[Any] = [self.constructed_objects[key_node] for key_node, _ in node.value] SCREAMING_SNAKE_CASE_ :Optional[Any] = [tuple(UpperCAmelCase) if isinstance(UpperCAmelCase , UpperCAmelCase) else key for key in keys] SCREAMING_SNAKE_CASE_ :List[Any] = Counter(UpperCAmelCase) SCREAMING_SNAKE_CASE_ :str = [key for key in counter if counter[key] > 1] if duplicate_keys: raise TypeError(F"Got duplicate yaml keys: {duplicate_keys}") def _snake_case ( self : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any]=False): SCREAMING_SNAKE_CASE_ :Optional[int] = super().construct_mapping(UpperCAmelCase , deep=UpperCAmelCase) self._check_no_duplicates_on_constructed_node(UpperCAmelCase) return mapping def lowercase ( a ): '''simple docstring''' SCREAMING_SNAKE_CASE_ :Any = list(readme_content.splitlines() ) if full_content and full_content[0] == "---" and "---" in full_content[1:]: SCREAMING_SNAKE_CASE_ :Union[str, Any] = full_content[1:].index("---" ) + 1 SCREAMING_SNAKE_CASE_ :List[Any] = "\n".join(full_content[1:sep_idx] ) return yamlblock, "\n".join(full_content[sep_idx + 1 :] ) return None, "\n".join(a ) class _UpperCAmelCase ( lowercase ): # class attributes lowerCamelCase_ : List[Any] = {"""train_eval_index"""} # train-eval-index in the YAML metadata @classmethod def _snake_case ( cls : Optional[int] , UpperCAmelCase : Path): with open(UpperCAmelCase , encoding="utf-8") as readme_file: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Optional[Any] = _split_yaml_from_readme(readme_file.read()) if yaml_string is not None: return cls.from_yaml_string(UpperCAmelCase) else: return cls() def _snake_case ( self : Dict , UpperCAmelCase : Path): if path.exists(): with open(UpperCAmelCase , encoding="utf-8") as readme_file: SCREAMING_SNAKE_CASE_ :Optional[Any] = readme_file.read() else: SCREAMING_SNAKE_CASE_ :List[str] = None SCREAMING_SNAKE_CASE_ :List[Any] = self._to_readme(UpperCAmelCase) with open(UpperCAmelCase , "w" , encoding="utf-8") as readme_file: readme_file.write(UpperCAmelCase) def _snake_case ( self : Union[str, Any] , UpperCAmelCase : Optional[str] = None): if readme_content is not None: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :List[str] = _split_yaml_from_readme(UpperCAmelCase) SCREAMING_SNAKE_CASE_ :int = "---\n" + self.to_yaml_string() + "---\n" + content else: SCREAMING_SNAKE_CASE_ :Any = "---\n" + self.to_yaml_string() + "---\n" return full_content @classmethod def _snake_case ( cls : List[str] , UpperCAmelCase : str): SCREAMING_SNAKE_CASE_ :Optional[Any] = yaml.load(UpperCAmelCase , Loader=_NoDuplicateSafeLoader) or {} # Convert the YAML keys to DatasetMetadata fields SCREAMING_SNAKE_CASE_ :Union[str, Any] = { (key.replace("-" , "_") if key.replace("-" , "_") in cls._FIELDS_WITH_DASHES else key): value for key, value in metadata_dict.items() } return cls(**UpperCAmelCase) def _snake_case ( self : str): return yaml.safe_dump( { (key.replace("_" , "-") if key in self._FIELDS_WITH_DASHES else key): value for key, value in self.items() } , sort_keys=UpperCAmelCase , allow_unicode=UpperCAmelCase , encoding="utf-8" , ).decode("utf-8") SCREAMING_SNAKE_CASE__ = { "image-classification": [], "translation": [], "image-segmentation": [], "fill-mask": [], "automatic-speech-recognition": [], "token-classification": [], "sentence-similarity": [], "audio-classification": [], "question-answering": [], "summarization": [], "zero-shot-classification": [], "table-to-text": [], "feature-extraction": [], "other": [], "multiple-choice": [], "text-classification": [], "text-to-image": [], "text2text-generation": [], "zero-shot-image-classification": [], "tabular-classification": [], "tabular-regression": [], "image-to-image": [], "tabular-to-text": [], "unconditional-image-generation": [], "text-retrieval": [], "text-to-speech": [], "object-detection": [], "audio-to-audio": [], "text-generation": [], "conversational": [], "table-question-answering": [], "visual-question-answering": [], "image-to-text": [], "reinforcement-learning": [], "voice-activity-detection": [], "time-series-forecasting": [], "document-question-answering": [], } if __name__ == "__main__": from argparse import ArgumentParser SCREAMING_SNAKE_CASE__ = ArgumentParser(usage="Validate the yaml metadata block of a README.md file.") ap.add_argument("readme_filepath") SCREAMING_SNAKE_CASE__ = ap.parse_args() SCREAMING_SNAKE_CASE__ = Path(args.readme_filepath) SCREAMING_SNAKE_CASE__ = DatasetMetadata.from_readme(readme_filepath) print(dataset_metadata) dataset_metadata.to_readme(readme_filepath)
631
0
import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCAmelCase ( _a , unittest.TestCase ): _SCREAMING_SNAKE_CASE : Any =LongformerTokenizer _SCREAMING_SNAKE_CASE : int =True _SCREAMING_SNAKE_CASE : Optional[int] =LongformerTokenizerFast _SCREAMING_SNAKE_CASE : Optional[Any] =True def a__ ( self ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt _A= [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] _A= dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) ) _A= ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] _A= {'unk_token': '<unk>'} _A= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) _A= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(lowerCAmelCase__ ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(lowerCAmelCase__ ) ) def a__ ( self , **lowerCAmelCase__ ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) def a__ ( self , **lowerCAmelCase__ ): kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) def a__ ( self , lowerCAmelCase__ ): _A= 'lower newer' _A= 'lower newer' return input_text, output_text def a__ ( self ): _A= self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) _A= 'lower newer' _A= ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er'] _A= tokenizer.tokenize(lowerCAmelCase__ ) # , add_prefix_space=True) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) _A= tokens + [tokenizer.unk_token] _A= [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ ) def a__ ( self ): _A= self.get_tokenizer() self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=lowerCAmelCase__ ) , [0, 31414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=lowerCAmelCase__ ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , ) @slow def a__ ( self ): _A= self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' ) _A= tokenizer.encode('sequence builders' , add_special_tokens=lowerCAmelCase__ ) _A= tokenizer.encode('multi-sequence build' , add_special_tokens=lowerCAmelCase__ ) _A= tokenizer.encode( 'sequence builders' , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ ) _A= tokenizer.encode( 'sequence builders' , 'multi-sequence build' , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ ) _A= tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ ) _A= tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def a__ ( self ): _A= self.get_tokenizer() _A= 'Encode this sequence.' _A= tokenizer.byte_encoder[' '.encode('utf-8' )[0]] # Testing encoder arguments _A= tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ ) _A= tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ ) _A= tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ ) _A= tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) tokenizer.add_special_tokens({'bos_token': '<s>'} ) _A= tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) _A= tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ ) # Testing spaces after special tokens _A= '<mask>' tokenizer.add_special_tokens( {'mask_token': AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ )} ) # mask token has a left space _A= tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) _A= 'Encode <mask> sequence' _A= 'Encode <mask>sequence' _A= tokenizer.encode(lowerCAmelCase__ ) _A= encoded.index(lowerCAmelCase__ ) _A= tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) _A= tokenizer.encode(lowerCAmelCase__ ) _A= encoded.index(lowerCAmelCase__ ) _A= tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ ) def a__ ( self ): pass def a__ ( self ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): _A= self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ ) _A= self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ ) _A= 'A, <mask> AllenNLP sentence.' _A= tokenizer_r.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ ) _A= tokenizer_p.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , ) _A= tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] ) _A= tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual( lowerCAmelCase__ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) self.assertSequenceEqual( lowerCAmelCase__ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) def a__ ( self ): for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): _A= self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ ) _A= json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) _A= json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state['add_prefix_space'] , lowerCAmelCase__ ) self.assertEqual(post_processor_state['add_prefix_space'] , lowerCAmelCase__ ) self.assertEqual(post_processor_state['trim_offsets'] , lowerCAmelCase__ ) def a__ ( self ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): _A= 'hello' # `hello` is a token in the vocabulary of `pretrained_name` _A= f"{text_of_1_token} {text_of_1_token}" _A= self.rust_tokenizer_class.from_pretrained( lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ ) _A= tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowerCAmelCase__ ) + 1, len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , ) _A= self.rust_tokenizer_class.from_pretrained( lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ ) _A= tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowerCAmelCase__ ) + 1, len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , ) _A= self.rust_tokenizer_class.from_pretrained( lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ ) _A= tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowerCAmelCase__ ), len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , ) _A= self.rust_tokenizer_class.from_pretrained( lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ ) _A= tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowerCAmelCase__ ), len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , ) _A= f" {text}" # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) _A= self.rust_tokenizer_class.from_pretrained( lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ ) _A= tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCAmelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ) + 1, 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , ) _A= self.rust_tokenizer_class.from_pretrained( lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ ) _A= tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ), 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , ) _A= self.rust_tokenizer_class.from_pretrained( lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ ) _A= tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ), 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
716
from dataclasses import dataclass from typing import Tuple import numpy as np import torch @dataclass class lowerCAmelCase : _SCREAMING_SNAKE_CASE : torch.Tensor # [batch_size x 3] _SCREAMING_SNAKE_CASE : torch.Tensor # [batch_size x 3] _SCREAMING_SNAKE_CASE : torch.Tensor # [batch_size x 3] _SCREAMING_SNAKE_CASE : torch.Tensor # [batch_size x 3] _SCREAMING_SNAKE_CASE : int _SCREAMING_SNAKE_CASE : int _SCREAMING_SNAKE_CASE : float _SCREAMING_SNAKE_CASE : float _SCREAMING_SNAKE_CASE : Tuple[int] def a__ ( self ): assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0] assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3 assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2 def a__ ( self ): return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) ) def a__ ( self ): return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) ) def a__ ( self ): _A= torch.arange(self.height * self.width ) _A= torch.stack( [ pixel_indices % self.width, torch.div(lowerCAmelCase__ , self.width , rounding_mode='trunc' ), ] , axis=1 , ) return coords @property def a__ ( self ): _A, *_A= self.shape _A= int(np.prod(lowerCAmelCase__ ) ) _A= self.get_image_coords() _A= torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] ) _A= self.get_camera_rays(lowerCAmelCase__ ) _A= rays.view(lowerCAmelCase__ , inner_batch_size * self.height * self.width , 2 , 3 ) return rays def a__ ( self , lowerCAmelCase__ ): _A, *_A, _A= coords.shape assert n_coords == 2 assert batch_size == self.origin.shape[0] _A= coords.view(lowerCAmelCase__ , -1 , 2 ) _A= self.resolution() _A= self.fov() _A= (flat.float() / (res - 1)) * 2 - 1 _A= fracs * torch.tan(fov / 2 ) _A= fracs.view(lowerCAmelCase__ , -1 , 2 ) _A= ( self.z.view(lowerCAmelCase__ , 1 , 3 ) + self.x.view(lowerCAmelCase__ , 1 , 3 ) * fracs[:, :, :1] + self.y.view(lowerCAmelCase__ , 1 , 3 ) * fracs[:, :, 1:] ) _A= directions / directions.norm(dim=-1 , keepdim=lowerCAmelCase__ ) _A= torch.stack( [ torch.broadcast_to(self.origin.view(lowerCAmelCase__ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ), directions, ] , dim=2 , ) return rays.view(lowerCAmelCase__ , *lowerCAmelCase__ , 2 , 3 ) def a__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ): assert width * self.height == height * self.width, "The aspect ratio should not change." return DifferentiableProjectiveCamera( origin=self.origin , x=self.x , y=self.y , z=self.z , width=lowerCAmelCase__ , height=lowerCAmelCase__ , x_fov=self.x_fov , y_fov=self.y_fov , ) def UpperCamelCase ( lowerCAmelCase_ ) -> DifferentiableProjectiveCamera: '''simple docstring''' _A= [] _A= [] _A= [] _A= [] for theta in np.linspace(0 , 2 * np.pi , num=20 ): _A= np.array([np.sin(lowerCAmelCase_ ), np.cos(lowerCAmelCase_ ), -0.5] ) z /= np.sqrt(np.sum(z**2 ) ) _A= -z * 4 _A= np.array([np.cos(lowerCAmelCase_ ), -np.sin(lowerCAmelCase_ ), 0.0] ) _A= np.cross(lowerCAmelCase_ , lowerCAmelCase_ ) origins.append(lowerCAmelCase_ ) xs.append(lowerCAmelCase_ ) ys.append(lowerCAmelCase_ ) zs.append(lowerCAmelCase_ ) return DifferentiableProjectiveCamera( origin=torch.from_numpy(np.stack(lowerCAmelCase_ , axis=0 ) ).float() , x=torch.from_numpy(np.stack(lowerCAmelCase_ , axis=0 ) ).float() , y=torch.from_numpy(np.stack(lowerCAmelCase_ , axis=0 ) ).float() , z=torch.from_numpy(np.stack(lowerCAmelCase_ , axis=0 ) ).float() , width=lowerCAmelCase_ , height=lowerCAmelCase_ , x_fov=0.7 , y_fov=0.7 , shape=(1, len(lowerCAmelCase_ )) , )
476
0
from typing import Dict, Iterable, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) def a (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): return [ int(1_0_0_0 * (box[0] / width) ), int(1_0_0_0 * (box[1] / height) ), int(1_0_0_0 * (box[2] / width) ), int(1_0_0_0 * (box[3] / height) ), ] def a (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): SCREAMING_SNAKE_CASE_ = to_pil_image(UpperCamelCase_ ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = pil_image.size SCREAMING_SNAKE_CASE_ = pytesseract.image_to_data(UpperCamelCase_ , lang=UpperCamelCase_ , output_type='''dict''' , config=UpperCamelCase_ ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height'''] # filter empty words and corresponding coordinates SCREAMING_SNAKE_CASE_ = [idx for idx, word in enumerate(UpperCamelCase_ ) if not word.strip()] SCREAMING_SNAKE_CASE_ = [word for idx, word in enumerate(UpperCamelCase_ ) if idx not in irrelevant_indices] SCREAMING_SNAKE_CASE_ = [coord for idx, coord in enumerate(UpperCamelCase_ ) if idx not in irrelevant_indices] SCREAMING_SNAKE_CASE_ = [coord for idx, coord in enumerate(UpperCamelCase_ ) if idx not in irrelevant_indices] SCREAMING_SNAKE_CASE_ = [coord for idx, coord in enumerate(UpperCamelCase_ ) if idx not in irrelevant_indices] SCREAMING_SNAKE_CASE_ = [coord for idx, coord in enumerate(UpperCamelCase_ ) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format SCREAMING_SNAKE_CASE_ = [] for x, y, w, h in zip(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): SCREAMING_SNAKE_CASE_ = [x, y, x + w, y + h] actual_boxes.append(UpperCamelCase_ ) # finally, normalize the bounding boxes SCREAMING_SNAKE_CASE_ = [] for box in actual_boxes: normalized_boxes.append(normalize_box(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) ) assert len(UpperCamelCase_ ) == len(UpperCamelCase_ ), "Not as many words as there are bounding boxes" return words, normalized_boxes class __magic_name__ ( __UpperCAmelCase): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = ["pixel_values"] def __init__( self: List[Any] , _lowerCamelCase: bool = True , _lowerCamelCase: Dict[str, int] = None , _lowerCamelCase: PILImageResampling = PILImageResampling.BILINEAR , _lowerCamelCase: bool = True , _lowerCamelCase: float = 1 / 2_55 , _lowerCamelCase: bool = True , _lowerCamelCase: Union[float, Iterable[float]] = None , _lowerCamelCase: Union[float, Iterable[float]] = None , _lowerCamelCase: bool = True , _lowerCamelCase: Optional[str] = None , _lowerCamelCase: Optional[str] = "" , **_lowerCamelCase: int , ): super().__init__(**_lowerCamelCase ) SCREAMING_SNAKE_CASE_ = size if size is not None else {'''height''': 2_24, '''width''': 2_24} SCREAMING_SNAKE_CASE_ = get_size_dict(_lowerCamelCase ) SCREAMING_SNAKE_CASE_ = do_resize SCREAMING_SNAKE_CASE_ = size SCREAMING_SNAKE_CASE_ = resample SCREAMING_SNAKE_CASE_ = do_rescale SCREAMING_SNAKE_CASE_ = rescale_value SCREAMING_SNAKE_CASE_ = do_normalize SCREAMING_SNAKE_CASE_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN SCREAMING_SNAKE_CASE_ = image_std if image_std is not None else IMAGENET_STANDARD_STD SCREAMING_SNAKE_CASE_ = apply_ocr SCREAMING_SNAKE_CASE_ = ocr_lang SCREAMING_SNAKE_CASE_ = tesseract_config def _A ( self: List[str] , _lowerCamelCase: np.ndarray , _lowerCamelCase: Dict[str, int] , _lowerCamelCase: PILImageResampling = PILImageResampling.BILINEAR , _lowerCamelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCamelCase: List[str] , ): SCREAMING_SNAKE_CASE_ = get_size_dict(_lowerCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" ) SCREAMING_SNAKE_CASE_ = (size['''height'''], size['''width''']) return resize(_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase ) def _A ( self: List[Any] , _lowerCamelCase: np.ndarray , _lowerCamelCase: Union[int, float] , _lowerCamelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCamelCase: Tuple , ): return rescale(_lowerCamelCase , scale=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase ) def _A ( self: int , _lowerCamelCase: np.ndarray , _lowerCamelCase: Union[float, Iterable[float]] , _lowerCamelCase: Union[float, Iterable[float]] , _lowerCamelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCamelCase: int , ): return normalize(_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase ) def _A ( self: Optional[Any] , _lowerCamelCase: ImageInput , _lowerCamelCase: bool = None , _lowerCamelCase: Dict[str, int] = None , _lowerCamelCase: Any=None , _lowerCamelCase: bool = None , _lowerCamelCase: float = None , _lowerCamelCase: bool = None , _lowerCamelCase: Union[float, Iterable[float]] = None , _lowerCamelCase: Union[float, Iterable[float]] = None , _lowerCamelCase: bool = None , _lowerCamelCase: Optional[str] = None , _lowerCamelCase: Optional[str] = None , _lowerCamelCase: Optional[Union[str, TensorType]] = None , _lowerCamelCase: ChannelDimension = ChannelDimension.FIRST , **_lowerCamelCase: int , ): SCREAMING_SNAKE_CASE_ = do_resize if do_resize is not None else self.do_resize SCREAMING_SNAKE_CASE_ = size if size is not None else self.size SCREAMING_SNAKE_CASE_ = get_size_dict(_lowerCamelCase ) SCREAMING_SNAKE_CASE_ = resample if resample is not None else self.resample SCREAMING_SNAKE_CASE_ = do_rescale if do_rescale is not None else self.do_rescale SCREAMING_SNAKE_CASE_ = rescale_factor if rescale_factor is not None else self.rescale_factor SCREAMING_SNAKE_CASE_ = do_normalize if do_normalize is not None else self.do_normalize SCREAMING_SNAKE_CASE_ = image_mean if image_mean is not None else self.image_mean SCREAMING_SNAKE_CASE_ = image_std if image_std is not None else self.image_std SCREAMING_SNAKE_CASE_ = apply_ocr if apply_ocr is not None else self.apply_ocr SCREAMING_SNAKE_CASE_ = ocr_lang if ocr_lang is not None else self.ocr_lang SCREAMING_SNAKE_CASE_ = tesseract_config if tesseract_config is not None else self.tesseract_config SCREAMING_SNAKE_CASE_ = make_list_of_images(_lowerCamelCase ) if not valid_images(_lowerCamelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' ) # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE_ = [to_numpy_array(_lowerCamelCase ) for image in images] # Tesseract OCR to get words + normalized bounding boxes if apply_ocr: requires_backends(self , '''pytesseract''' ) SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = [] for image in images: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = apply_tesseract(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) words_batch.append(_lowerCamelCase ) boxes_batch.append(_lowerCamelCase ) if do_resize: SCREAMING_SNAKE_CASE_ = [self.resize(image=_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase ) for image in images] if do_rescale: SCREAMING_SNAKE_CASE_ = [self.rescale(image=_lowerCamelCase , scale=_lowerCamelCase ) for image in images] if do_normalize: SCREAMING_SNAKE_CASE_ = [self.normalize(image=_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase ) for image in images] SCREAMING_SNAKE_CASE_ = [to_channel_dimension_format(_lowerCamelCase , _lowerCamelCase ) for image in images] SCREAMING_SNAKE_CASE_ = BatchFeature(data={'''pixel_values''': images} , tensor_type=_lowerCamelCase ) if apply_ocr: SCREAMING_SNAKE_CASE_ = words_batch SCREAMING_SNAKE_CASE_ = boxes_batch return data
234
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType from ...utils.imports import is_botoa_available from .config_args import SageMakerConfig from .config_utils import ( DYNAMO_BACKENDS, _ask_field, _ask_options, _convert_dynamo_backend, _convert_mixed_precision, _convert_sagemaker_distributed_mode, _convert_yes_no_to_bool, ) if is_botoa_available(): import botoa # noqa: F401 def a_ ( UpperCamelCase_ ): A_ = botoa.client("iam" ) A_ = { "Version": "2012-10-17", "Statement": [ {"Effect": "Allow", "Principal": {"Service": "sagemaker.amazonaws.com"}, "Action": "sts:AssumeRole"} ], } try: # create the role, associated with the chosen trust policy iam_client.create_role( RoleName=UpperCamelCase_ , AssumeRolePolicyDocument=json.dumps(UpperCamelCase_ , indent=2 ) ) A_ = { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "sagemaker:*", "ecr:GetDownloadUrlForLayer", "ecr:BatchGetImage", "ecr:BatchCheckLayerAvailability", "ecr:GetAuthorizationToken", "cloudwatch:PutMetricData", "cloudwatch:GetMetricData", "cloudwatch:GetMetricStatistics", "cloudwatch:ListMetrics", "logs:CreateLogGroup", "logs:CreateLogStream", "logs:DescribeLogStreams", "logs:PutLogEvents", "logs:GetLogEvents", "s3:CreateBucket", "s3:ListBucket", "s3:GetBucketLocation", "s3:GetObject", "s3:PutObject", ], "Resource": "*", } ], } # attach policy to role iam_client.put_role_policy( RoleName=UpperCamelCase_ , PolicyName=f"{role_name}_policy_permission" , PolicyDocument=json.dumps(UpperCamelCase_ , indent=2 ) , ) except iam_client.exceptions.EntityAlreadyExistsException: print(f"role {role_name} already exists. Using existing one" ) def a_ ( UpperCamelCase_ ): A_ = botoa.client("iam" ) return iam_client.get_role(RoleName=UpperCamelCase_ )["Role"]["Arn"] def a_ ( ): A_ = _ask_options( "How do you want to authorize?" , ["AWS Profile", "Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) "] , UpperCamelCase_ , ) A_ = None if credentials_configuration == 0: A_ = _ask_field("Enter your AWS Profile name: [default] " , default="default" ) A_ = aws_profile else: print( "Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with," "`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`" ) A_ = _ask_field("AWS Access Key ID: " ) A_ = aws_access_key_id A_ = _ask_field("AWS Secret Access Key: " ) A_ = aws_secret_access_key A_ = _ask_field("Enter your AWS Region: [us-east-1]" , default="us-east-1" ) A_ = aws_region A_ = _ask_options( "Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?" , ["Provide IAM Role name", "Create new IAM role using credentials"] , UpperCamelCase_ , ) if role_management == 0: A_ = _ask_field("Enter your IAM role name: " ) else: A_ = "accelerate_sagemaker_execution_role" print(f"Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials" ) _create_iam_role_for_sagemaker(UpperCamelCase_ ) A_ = _ask_field( "Do you want to use custom Docker image? [yes/NO]: " , _convert_yes_no_to_bool , default=UpperCamelCase_ , error_message="Please enter yes or no." , ) A_ = None if is_custom_docker_image: A_ = _ask_field("Enter your Docker image: " , lambda UpperCamelCase_ : str(UpperCamelCase_ ).lower() ) A_ = _ask_field( "Do you want to provide SageMaker input channels with data locations? [yes/NO]: " , _convert_yes_no_to_bool , default=UpperCamelCase_ , error_message="Please enter yes or no." , ) A_ = None if is_sagemaker_inputs_enabled: A_ = _ask_field( "Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): " , lambda UpperCamelCase_ : str(UpperCamelCase_ ).lower() , ) A_ = _ask_field( "Do you want to enable SageMaker metrics? [yes/NO]: " , _convert_yes_no_to_bool , default=UpperCamelCase_ , error_message="Please enter yes or no." , ) A_ = None if is_sagemaker_metrics_enabled: A_ = _ask_field( "Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): " , lambda UpperCamelCase_ : str(UpperCamelCase_ ).lower() , ) A_ = _ask_options( "What is the distributed mode?" , ["No distributed training", "Data parallelism"] , _convert_sagemaker_distributed_mode , ) A_ = {} A_ = _ask_field( "Do you wish to optimize your script with torch dynamo?[yes/NO]:" , _convert_yes_no_to_bool , default=UpperCamelCase_ , error_message="Please enter yes or no." , ) if use_dynamo: A_ = "dynamo_" A_ = _ask_options( "Which dynamo backend would you like to use?" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , ) A_ = _ask_field( "Do you want to customize the defaults sent to torch.compile? [yes/NO]: " , _convert_yes_no_to_bool , default=UpperCamelCase_ , error_message="Please enter yes or no." , ) if use_custom_options: A_ = _ask_options( "Which mode do you want to use?" , UpperCamelCase_ , lambda UpperCamelCase_ : TORCH_DYNAMO_MODES[int(UpperCamelCase_ )] , default="default" , ) A_ = _ask_field( "Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: " , _convert_yes_no_to_bool , default=UpperCamelCase_ , error_message="Please enter yes or no." , ) A_ = _ask_field( "Do you want to enable dynamic shape tracing? [yes/NO]: " , _convert_yes_no_to_bool , default=UpperCamelCase_ , error_message="Please enter yes or no." , ) A_ = "Which EC2 instance type you want to use for your training?" if distributed_type != SageMakerDistributedType.NO: A_ = _ask_options( UpperCamelCase_ , UpperCamelCase_ , lambda UpperCamelCase_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(UpperCamelCase_ )] ) else: eca_instance_query += "? [ml.p3.2xlarge]:" A_ = _ask_field(UpperCamelCase_ , lambda UpperCamelCase_ : str(UpperCamelCase_ ).lower() , default="ml.p3.2xlarge" ) A_ = 1 if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL): A_ = _ask_field( "How many machines do you want use? [1]: " , UpperCamelCase_ , default=1 , ) A_ = _ask_options( "Do you wish to use FP16 or BF16 (mixed precision)?" , ["no", "fp16", "bf16", "fp8"] , _convert_mixed_precision , ) if use_dynamo and mixed_precision == "no": print( "Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts." ) return SageMakerConfig( image_uri=UpperCamelCase_ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=UpperCamelCase_ , use_cpu=UpperCamelCase_ , dynamo_config=UpperCamelCase_ , eca_instance_type=UpperCamelCase_ , profile=UpperCamelCase_ , region=UpperCamelCase_ , iam_role_name=UpperCamelCase_ , mixed_precision=UpperCamelCase_ , num_machines=UpperCamelCase_ , sagemaker_inputs_file=UpperCamelCase_ , sagemaker_metrics_file=UpperCamelCase_ , )
452
0
"""simple docstring""" from __future__ import annotations import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTForImageClassification, TFViTModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __UpperCAmelCase : def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=30 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=32 , _lowerCamelCase=2 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=10 , _lowerCamelCase=0.0_2 , _lowerCamelCase=3 , _lowerCamelCase=None , ): lowerCamelCase__ =parent lowerCamelCase__ =batch_size lowerCamelCase__ =image_size lowerCamelCase__ =patch_size lowerCamelCase__ =num_channels lowerCamelCase__ =is_training lowerCamelCase__ =use_labels lowerCamelCase__ =hidden_size lowerCamelCase__ =num_hidden_layers lowerCamelCase__ =num_attention_heads lowerCamelCase__ =intermediate_size lowerCamelCase__ =hidden_act lowerCamelCase__ =hidden_dropout_prob lowerCamelCase__ =attention_probs_dropout_prob lowerCamelCase__ =type_sequence_label_size lowerCamelCase__ =initializer_range lowerCamelCase__ =scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) lowerCamelCase__ =(image_size // patch_size) ** 2 lowerCamelCase__ =num_patches + 1 def _a ( self ): lowerCamelCase__ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ =None if self.use_labels: lowerCamelCase__ =ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ =self.get_config() return config, pixel_values, labels def _a ( self ): return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , ) def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): lowerCamelCase__ =TFViTModel(config=_lowerCamelCase ) lowerCamelCase__ =model(_lowerCamelCase , training=_lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # Test with an image with different size than the one specified in config. lowerCamelCase__ =self.image_size // 2 lowerCamelCase__ =pixel_values[:, :, :image_size, :image_size] lowerCamelCase__ =model(_lowerCamelCase , interpolate_pos_encoding=_lowerCamelCase , training=_lowerCamelCase ) lowerCamelCase__ =(image_size // self.patch_size) ** 2 + 1 self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) ) def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): lowerCamelCase__ =self.type_sequence_label_size lowerCamelCase__ =TFViTForImageClassification(_lowerCamelCase ) lowerCamelCase__ =model(_lowerCamelCase , labels=_lowerCamelCase , training=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # Test with an image with different size than the one specified in config. lowerCamelCase__ =self.image_size // 2 lowerCamelCase__ =pixel_values[:, :, :image_size, :image_size] lowerCamelCase__ =model(_lowerCamelCase , interpolate_pos_encoding=_lowerCamelCase , training=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCamelCase__ =1 lowerCamelCase__ =TFViTForImageClassification(_lowerCamelCase ) lowerCamelCase__ =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase__ =model(_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _a ( self ): lowerCamelCase__ =self.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ =config_and_inputs lowerCamelCase__ ={"pixel_values": pixel_values} return config, inputs_dict @require_tf class __UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): A__ : str = (TFViTModel, TFViTForImageClassification) if is_tf_available() else () A__ : Any = ( {"""feature-extraction""": TFViTModel, """image-classification""": TFViTForImageClassification} if is_tf_available() else {} ) A__ : str = False A__ : Optional[Any] = False A__ : List[Any] = False def _a ( self ): lowerCamelCase__ =TFViTModelTester(self ) lowerCamelCase__ =ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 ) def _a ( self ): self.config_tester.run_common_tests() @unittest.skip(reason="ViT does not use inputs_embeds" ) def _a ( self ): pass @unittest.skip(reason="ViT does not use inputs_embeds" ) def _a ( self ): pass def _a ( self ): lowerCamelCase__ , lowerCamelCase__ =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ =model_class(_lowerCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) lowerCamelCase__ =model.get_output_embeddings() self.assertTrue(x is None or isinstance(_lowerCamelCase , tf.keras.layers.Layer ) ) def _a ( self ): lowerCamelCase__ , lowerCamelCase__ =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ =model_class(_lowerCamelCase ) lowerCamelCase__ =inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ =[*signature.parameters.keys()] lowerCamelCase__ =["pixel_values"] self.assertListEqual(arg_names[:1] , _lowerCamelCase ) def _a ( self ): lowerCamelCase__ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def _a ( self ): lowerCamelCase__ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase ) @slow def _a ( self ): lowerCamelCase__ =TFViTModel.from_pretrained("google/vit-base-patch16-224" ) self.assertIsNotNone(_lowerCamelCase ) def lowerCamelCase_ ( ) -> List[str]: '''simple docstring''' lowerCamelCase__ =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class __UpperCAmelCase ( unittest.TestCase ): @cached_property def _a ( self ): return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None @slow def _a ( self ): lowerCamelCase__ =TFViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ) lowerCamelCase__ =self.default_image_processor lowerCamelCase__ =prepare_img() lowerCamelCase__ =image_processor(images=_lowerCamelCase , return_tensors="tf" ) # forward pass lowerCamelCase__ =model(**_lowerCamelCase ) # verify the logits lowerCamelCase__ =tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , _lowerCamelCase ) lowerCamelCase__ =tf.constant([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] ) tf.debugging.assert_near(outputs.logits[0, :3] , _lowerCamelCase , atol=1E-4 )
701
"""simple docstring""" from collections.abc import Sequence from queue import Queue class __UpperCAmelCase : def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ): lowerCamelCase__ =start lowerCamelCase__ =end lowerCamelCase__ =val lowerCamelCase__ =(start + end) // 2 lowerCamelCase__ =left lowerCamelCase__ =right def __repr__( self ): return F'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})''' class __UpperCAmelCase : def __init__( self , _lowerCamelCase , _lowerCamelCase ): lowerCamelCase__ =collection lowerCamelCase__ =function if self.collection: lowerCamelCase__ =self._build_tree(0 , len(_lowerCamelCase ) - 1 ) def _a ( self , _lowerCamelCase , _lowerCamelCase ): self._update_tree(self.root , _lowerCamelCase , _lowerCamelCase ) def _a ( self , _lowerCamelCase , _lowerCamelCase ): return self._query_range(self.root , _lowerCamelCase , _lowerCamelCase ) def _a ( self , _lowerCamelCase , _lowerCamelCase ): if start == end: return SegmentTreeNode(_lowerCamelCase , _lowerCamelCase , self.collection[start] ) lowerCamelCase__ =(start + end) // 2 lowerCamelCase__ =self._build_tree(_lowerCamelCase , _lowerCamelCase ) lowerCamelCase__ =self._build_tree(mid + 1 , _lowerCamelCase ) return SegmentTreeNode(_lowerCamelCase , _lowerCamelCase , self.fn(left.val , right.val ) , _lowerCamelCase , _lowerCamelCase ) def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): if node.start == i and node.end == i: lowerCamelCase__ =val return if i <= node.mid: self._update_tree(node.left , _lowerCamelCase , _lowerCamelCase ) else: self._update_tree(node.right , _lowerCamelCase , _lowerCamelCase ) lowerCamelCase__ =self.fn(node.left.val , node.right.val ) def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): if node.start == i and node.end == j: return node.val if i <= node.mid: if j <= node.mid: # range in left child tree return self._query_range(node.left , _lowerCamelCase , _lowerCamelCase ) else: # range in left child tree and right child tree return self.fn( self._query_range(node.left , _lowerCamelCase , node.mid ) , self._query_range(node.right , node.mid + 1 , _lowerCamelCase ) , ) else: # range in right child tree return self._query_range(node.right , _lowerCamelCase , _lowerCamelCase ) def _a ( self ): if self.root is not None: lowerCamelCase__ =Queue() queue.put(self.root ) while not queue.empty(): lowerCamelCase__ =queue.get() yield node if node.left is not None: queue.put(node.left ) if node.right is not None: queue.put(node.right ) if __name__ == "__main__": import operator for fn in [operator.add, max, min]: print('*' * 50) a =SegmentTree([2, 1, 5, 3, 4], fn) for node in arr.traverse(): print(node) print() arr.update(1, 5) for node in arr.traverse(): print(node) print() print(arr.query_range(3, 4)) # 7 print(arr.query_range(2, 2)) # 5 print(arr.query_range(1, 3)) # 13 print()
132
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json', } class __lowercase ( __lowerCamelCase ): snake_case_ = """lxmert""" snake_case_ = {} def __init__( self : Optional[Any] ,A : Any=30_522 ,A : Dict=768 ,A : Optional[Any]=12 ,A : Optional[int]=9_500 ,A : List[Any]=1_600 ,A : int=400 ,A : List[Any]=3_072 ,A : List[Any]="gelu" ,A : str=0.1 ,A : Optional[Any]=0.1 ,A : Any=512 ,A : Tuple=2 ,A : Optional[int]=0.0_2 ,A : List[Any]=1e-12 ,A : List[Any]=9 ,A : Optional[int]=5 ,A : List[str]=5 ,A : List[Any]=2_048 ,A : Union[str, Any]=4 ,A : Tuple=6.6_7 ,A : Union[str, Any]=True ,A : Optional[Any]=True ,A : str=True ,A : Tuple=True ,A : Union[str, Any]=True ,A : int=True ,A : List[Any]=True ,**A : Tuple ,): '''simple docstring''' UpperCAmelCase__ : int = vocab_size UpperCAmelCase__ : str = hidden_size UpperCAmelCase__ : Optional[int] = num_attention_heads UpperCAmelCase__ : List[Any] = hidden_act UpperCAmelCase__ : Tuple = intermediate_size UpperCAmelCase__ : Optional[Any] = hidden_dropout_prob UpperCAmelCase__ : List[Any] = attention_probs_dropout_prob UpperCAmelCase__ : Optional[int] = max_position_embeddings UpperCAmelCase__ : List[str] = type_vocab_size UpperCAmelCase__ : Dict = initializer_range UpperCAmelCase__ : List[str] = layer_norm_eps UpperCAmelCase__ : Tuple = num_qa_labels UpperCAmelCase__ : Optional[Any] = num_object_labels UpperCAmelCase__ : Dict = num_attr_labels UpperCAmelCase__ : Optional[int] = l_layers UpperCAmelCase__ : Dict = x_layers UpperCAmelCase__ : int = r_layers UpperCAmelCase__ : Optional[int] = visual_feat_dim UpperCAmelCase__ : Optional[Any] = visual_pos_dim UpperCAmelCase__ : Any = visual_loss_normalizer UpperCAmelCase__ : Optional[Any] = task_matched UpperCAmelCase__ : str = task_mask_lm UpperCAmelCase__ : Union[str, Any] = task_obj_predict UpperCAmelCase__ : int = task_qa UpperCAmelCase__ : Union[str, Any] = visual_obj_loss UpperCAmelCase__ : Tuple = visual_attr_loss UpperCAmelCase__ : Tuple = visual_feat_loss UpperCAmelCase__ : int = {"""vision""": r_layers, """cross_encoder""": x_layers, """language""": l_layers} super().__init__(**A )
65
"""simple docstring""" from typing import List, Optional, Union import numpy as np from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ....feature_extraction_sequence_utils import SequenceFeatureExtractor from ....feature_extraction_utils import BatchFeature from ....file_utils import PaddingStrategy, TensorType from ....utils import logging __UpperCAmelCase = logging.get_logger(__name__) class __lowercase ( __lowerCamelCase ): snake_case_ = ["""input_features""", """attention_mask"""] def __init__( self : Any ,A : str=80 ,A : Optional[int]=16_000 ,A : int=0.0 ,A : str=10 ,A : Any=25 ,A : str="hamming_window" ,A : int=3_2_7_6_8.0 ,A : List[str]=0.9_7 ,A : Optional[int]=1.0 ,A : Optional[Any]=True ,A : Tuple=True ,A : Any=False ,**A : int ,): '''simple docstring''' super().__init__(feature_size=A ,sampling_rate=A ,padding_value=A ,**A ) UpperCAmelCase__ : str = feature_size UpperCAmelCase__ : int = sampling_rate UpperCAmelCase__ : int = padding_value UpperCAmelCase__ : Dict = hop_length UpperCAmelCase__ : int = win_length UpperCAmelCase__ : Dict = frame_signal_scale UpperCAmelCase__ : Dict = preemphasis_coeff UpperCAmelCase__ : str = mel_floor UpperCAmelCase__ : Any = normalize_means UpperCAmelCase__ : str = normalize_vars UpperCAmelCase__ : int = win_function UpperCAmelCase__ : List[Any] = return_attention_mask UpperCAmelCase__ : str = win_length * sampling_rate // 1_000 UpperCAmelCase__ : List[Any] = hop_length * sampling_rate // 1_000 UpperCAmelCase__ : int = optimal_fft_length(self.sample_size ) UpperCAmelCase__ : List[Any] = (self.n_fft // 2) + 1 def __lowercase ( self : Union[str, Any] ,A : np.array ): '''simple docstring''' if self.win_function == "hamming_window": UpperCAmelCase__ : Any = window_function(window_length=self.sample_size ,name=self.win_function ,periodic=A ) else: UpperCAmelCase__ : Any = window_function(window_length=self.sample_size ,name=self.win_function ) UpperCAmelCase__ : Union[str, Any] = mel_filter_bank( num_frequency_bins=self.n_freqs ,num_mel_filters=self.feature_size ,min_frequency=0.0 ,max_frequency=self.sampling_rate / 2.0 ,sampling_rate=self.sampling_rate ,) UpperCAmelCase__ : Optional[Any] = spectrogram( one_waveform * self.frame_signal_scale ,window=A ,frame_length=self.sample_size ,hop_length=self.sample_stride ,fft_length=self.n_fft ,center=A ,preemphasis=self.preemphasis_coeff ,mel_filters=A ,mel_floor=self.mel_floor ,log_mel="""log""" ,) return msfc_features.T def __lowercase ( self : str ,A : Any ,A : Optional[int] ,A : str ): '''simple docstring''' # make sure we normalize float32 arrays if self.normalize_means: UpperCAmelCase__ : Optional[Any] = x[:input_length].mean(axis=0 ) UpperCAmelCase__ : Any = np.subtract(A ,A ) if self.normalize_vars: UpperCAmelCase__ : str = x[:input_length].std(axis=0 ) UpperCAmelCase__ : Optional[int] = np.divide(A ,A ) if input_length < x.shape[0]: UpperCAmelCase__ : int = padding_value # make sure array is in float32 UpperCAmelCase__ : str = x.astype(np.floataa ) return x def __lowercase ( self : Union[str, Any] ,A : List[np.ndarray] ,A : Optional[np.ndarray] = None ): '''simple docstring''' UpperCAmelCase__ : Any = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [self._normalize_one(A ,A ,self.padding_value ) for x, n in zip(A ,A )] def __call__( self : Union[str, Any] ,A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,A : Union[bool, str, PaddingStrategy] = False ,A : Optional[int] = None ,A : bool = False ,A : Optional[int] = None ,A : Optional[bool] = None ,A : Optional[Union[str, TensorType]] = None ,A : Optional[int] = None ,**A : Tuple ,): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of" f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with" f" {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( """It is strongly recommended to pass the ``sampling_rate`` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) UpperCAmelCase__ : Optional[Any] = isinstance(A ,np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"Only mono-channel audio is supported for input to {self}" ) UpperCAmelCase__ : Any = is_batched_numpy or ( isinstance(A ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) )) ) if is_batched: UpperCAmelCase__ : List[str] = [np.asarray(A ,dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(A ,np.ndarray ): UpperCAmelCase__ : Union[str, Any] = np.asarray(A ,dtype=np.floataa ) elif isinstance(A ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): UpperCAmelCase__ : Optional[int] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: UpperCAmelCase__ : Optional[Any] = [raw_speech] # extract fbank features UpperCAmelCase__ : Tuple = [self._extract_mfsc_features(A ) for one_waveform in raw_speech] # convert into correct format for padding UpperCAmelCase__ : str = BatchFeature({"""input_features""": features} ) UpperCAmelCase__ : Optional[Any] = self.pad( A ,padding=A ,max_length=A ,truncation=A ,pad_to_multiple_of=A ,return_attention_mask=A ,**A ,) # make sure list is in array format UpperCAmelCase__ : Tuple = padded_inputs.get("""input_features""" ) if isinstance(input_features[0] ,A ): UpperCAmelCase__ : Union[str, Any] = [np.asarray(A ,dtype=np.floataa ) for feature in input_features] UpperCAmelCase__ : Dict = padded_inputs.get("""attention_mask""" ) if attention_mask is not None: UpperCAmelCase__ : str = [np.asarray(A ,dtype=np.intaa ) for array in attention_mask] if self.normalize_means or self.normalize_vars: UpperCAmelCase__ : Union[str, Any] = ( np.array(A ,dtype=np.intaa ) if self._get_padding_strategies(A ,max_length=A ) is not PaddingStrategy.DO_NOT_PAD and padding else None ) UpperCAmelCase__ : Any = self.normalize( padded_inputs["""input_features"""] ,attention_mask=A ) if return_tensors is not None: UpperCAmelCase__ : Union[str, Any] = padded_inputs.convert_to_tensors(A ) return padded_inputs
65
1
'''simple docstring''' from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time lowerCamelCase__ = Lock() def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 10 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(__lowerCAmelCase ) process_lock.release() # receive your right neighbor's value process_lock.acquire() _UpperCAmelCase : Tuple = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left _UpperCAmelCase : Dict = min(__lowerCAmelCase , __lowerCAmelCase ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(__lowerCAmelCase ) process_lock.release() # receive your left neighbor's value process_lock.acquire() _UpperCAmelCase : List[str] = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right _UpperCAmelCase : Optional[int] = max(__lowerCAmelCase , __lowerCAmelCase ) # after all swaps are performed, send the values back to main result_pipe[1].send(__lowerCAmelCase ) def __lowerCAmelCase (__lowerCAmelCase ): _UpperCAmelCase : List[Any] = [] _UpperCAmelCase : Union[str, Any] = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop _UpperCAmelCase : Optional[Any] = Pipe() _UpperCAmelCase : Dict = Pipe() process_array_.append( Process( target=__lowerCAmelCase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) _UpperCAmelCase : Optional[int] = temp_rs _UpperCAmelCase : Tuple = temp_rr for i in range(1 , len(__lowerCAmelCase ) - 1 ): _UpperCAmelCase : Optional[int] = Pipe() _UpperCAmelCase : int = Pipe() process_array_.append( Process( target=__lowerCAmelCase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) _UpperCAmelCase : List[Any] = temp_rs _UpperCAmelCase : int = temp_rr process_array_.append( Process( target=__lowerCAmelCase , args=( len(__lowerCAmelCase ) - 1, arr[len(__lowerCAmelCase ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(__lowerCAmelCase ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(__lowerCAmelCase ) ): _UpperCAmelCase : List[str] = result_pipe[p][0].recv() process_array_[p].join() return arr def __lowerCAmelCase (): _UpperCAmelCase : Any = list(range(10 , 0 , -1 ) ) print("Initial List" ) print(*__lowerCAmelCase ) _UpperCAmelCase : Tuple = odd_even_transposition(__lowerCAmelCase ) print("Sorted List\n" ) print(*__lowerCAmelCase ) if __name__ == "__main__": main()
40
'''simple docstring''' import argparse import gc import json import os import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler lowerCamelCase__ = 16 lowerCamelCase__ = 32 def __lowerCAmelCase (__lowerCAmelCase ): return int(x / 2**20 ) class lowerCAmelCase__ : def __enter__( self : int ) ->Optional[Any]: '''simple docstring''' gc.collect() torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero _UpperCAmelCase : Tuple = torch.cuda.memory_allocated() return self def __exit__( self : Tuple , *lowerCamelCase__ : str ) ->int: '''simple docstring''' gc.collect() torch.cuda.empty_cache() _UpperCAmelCase : List[str] = torch.cuda.memory_allocated() _UpperCAmelCase : Tuple = torch.cuda.max_memory_allocated() _UpperCAmelCase : List[Any] = bamb(self.end - self.begin ) _UpperCAmelCase : int = bamb(self.peak - self.begin ) # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}") def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase = 16 , __lowerCAmelCase = "bert-base-cased" , __lowerCAmelCase = 320 , __lowerCAmelCase = 160 , ): _UpperCAmelCase : int = AutoTokenizer.from_pretrained(__lowerCAmelCase ) _UpperCAmelCase : Any = load_dataset( "glue" , "mrpc" , split={"train": F"""train[:{n_train}]""", "validation": F"""validation[:{n_val}]"""} ) def tokenize_function(__lowerCAmelCase ): # max_length=None => use the model max length (it's actually the default) _UpperCAmelCase : List[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset _UpperCAmelCase : int = datasets.map( __lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=__lowerCAmelCase ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _UpperCAmelCase : List[Any] = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(__lowerCAmelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(__lowerCAmelCase , padding="max_length" , max_length=128 , return_tensors="pt" ) return tokenizer.pad(__lowerCAmelCase , padding="longest" , return_tensors="pt" ) # Instantiate dataloaders. _UpperCAmelCase : Any = DataLoader( tokenized_datasets["train"] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase ) _UpperCAmelCase : List[str] = DataLoader( tokenized_datasets["validation"] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase ) return train_dataloader, eval_dataloader def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ): # Initialize accelerator _UpperCAmelCase : Union[str, Any] = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _UpperCAmelCase : List[Any] = config["lr"] _UpperCAmelCase : List[Any] = int(config["num_epochs"] ) _UpperCAmelCase : int = int(config["seed"] ) _UpperCAmelCase : Union[str, Any] = int(config["batch_size"] ) _UpperCAmelCase : Tuple = args.model_name_or_path set_seed(__lowerCAmelCase ) _UpperCAmelCase , _UpperCAmelCase : List[str] = get_dataloaders(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , args.n_train , args.n_val ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _UpperCAmelCase : Optional[int] = AutoModelForSequenceClassification.from_pretrained(__lowerCAmelCase , return_dict=__lowerCAmelCase ) # Instantiate optimizer _UpperCAmelCase : Dict = ( AdamW if accelerator.state.deepspeed_plugin is None or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) _UpperCAmelCase : str = optimizer_cls(params=model.parameters() , lr=__lowerCAmelCase ) if accelerator.state.deepspeed_plugin is not None: _UpperCAmelCase : Any = accelerator.state.deepspeed_plugin.deepspeed_config[ "gradient_accumulation_steps" ] else: _UpperCAmelCase : Any = 1 _UpperCAmelCase : Optional[int] = (len(__lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): _UpperCAmelCase : Tuple = get_linear_schedule_with_warmup( optimizer=__lowerCAmelCase , num_warmup_steps=0 , num_training_steps=__lowerCAmelCase , ) else: _UpperCAmelCase : Optional[Any] = DummyScheduler(__lowerCAmelCase , total_num_steps=__lowerCAmelCase , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # We need to keep track of how many total steps we have iterated over _UpperCAmelCase : Union[str, Any] = 0 # We also need to keep track of the stating epoch so files are named properly _UpperCAmelCase : str = 0 # Now we train the model _UpperCAmelCase : Optional[Any] = {} for epoch in range(__lowerCAmelCase , __lowerCAmelCase ): with TorchTracemalloc() as tracemalloc: model.train() for step, batch in enumerate(__lowerCAmelCase ): _UpperCAmelCase : Union[str, Any] = model(**__lowerCAmelCase ) _UpperCAmelCase : Union[str, Any] = outputs.loss _UpperCAmelCase : List[Any] = loss / gradient_accumulation_steps accelerator.backward(__lowerCAmelCase ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage accelerator.print("Memory before entering the train : {}".format(bamb(tracemalloc.begin ) ) ) accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used ) ) accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked ) ) accelerator.print( "Total Peak Memory consumed during the train (max): {}".format( tracemalloc.peaked + bamb(tracemalloc.begin ) ) ) _UpperCAmelCase : Optional[int] = tracemalloc.peaked + bamb(tracemalloc.begin ) if args.peak_memory_upper_bound is not None: assert ( train_total_peak_memory[F"""epoch-{epoch}"""] <= args.peak_memory_upper_bound ), "Peak memory usage exceeded the upper bound" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , "peak_memory_utilization.json" ) , "w" ) as f: json.dump(__lowerCAmelCase , __lowerCAmelCase ) def __lowerCAmelCase (): _UpperCAmelCase : Any = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." ) parser.add_argument( "--model_name_or_path" , type=__lowerCAmelCase , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__lowerCAmelCase , ) parser.add_argument( "--output_dir" , type=__lowerCAmelCase , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , ) parser.add_argument( "--peak_memory_upper_bound" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value." , ) parser.add_argument( "--n_train" , type=__lowerCAmelCase , default=320 , help="Number of training examples to use." , ) parser.add_argument( "--n_val" , type=__lowerCAmelCase , default=160 , help="Number of validation examples to use." , ) parser.add_argument( "--num_epochs" , type=__lowerCAmelCase , default=1 , help="Number of train epochs." , ) _UpperCAmelCase : Tuple = parser.parse_args() _UpperCAmelCase : Optional[Any] = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16} training_function(__lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": main()
40
1
"""simple docstring""" from __future__ import annotations class A__: def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple=None ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = data __SCREAMING_SNAKE_CASE = None def __repr__( self : Optional[Any] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = self while temp: string_rep.append(f"""{temp.data}""" ) __SCREAMING_SNAKE_CASE = temp.next return "->".join(__SCREAMING_SNAKE_CASE ) def _a ( UpperCAmelCase__ ) -> List[Any]: if not elements_list: raise Exception('''The Elements List is empty''' ) __SCREAMING_SNAKE_CASE = __SCREAMING_SNAKE_CASE = Node(elements_list[0] ) for i in range(1 , len(UpperCAmelCase__ ) ): __SCREAMING_SNAKE_CASE = Node(elements_list[i] ) __SCREAMING_SNAKE_CASE = current.next return head def _a ( UpperCAmelCase__ ) -> None: if head_node is not None and isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): print_reverse(head_node.next ) print(head_node.data ) def _a ( ) -> Union[str, Any]: from doctest import testmod testmod() __SCREAMING_SNAKE_CASE = make_linked_list([14, 52, 14, 12, 43] ) print('''Linked List:''' ) print(UpperCAmelCase__ ) print('''Elements in Reverse:''' ) print_reverse(UpperCAmelCase__ ) if __name__ == "__main__": main()
482
"""simple docstring""" import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class A__: def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]=3 , __SCREAMING_SNAKE_CASE : Union[str, Any]=32 , __SCREAMING_SNAKE_CASE : List[Any]=3 , __SCREAMING_SNAKE_CASE : int=10 , __SCREAMING_SNAKE_CASE : List[Any]=[8, 16, 32, 64] , __SCREAMING_SNAKE_CASE : str=[1, 1, 2, 1] , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : List[str]="relu" , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Dict=["stage2", "stage3", "stage4"] , __SCREAMING_SNAKE_CASE : Optional[Any]=[2, 3, 4] , __SCREAMING_SNAKE_CASE : int=1 , ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = image_size __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = embeddings_size __SCREAMING_SNAKE_CASE = hidden_sizes __SCREAMING_SNAKE_CASE = depths __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = num_labels __SCREAMING_SNAKE_CASE = scope __SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = out_features __SCREAMING_SNAKE_CASE = out_indices __SCREAMING_SNAKE_CASE = num_groups def _a ( self : List[Any] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __SCREAMING_SNAKE_CASE = None if self.use_labels: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels ) __SCREAMING_SNAKE_CASE = self.get_config() return config, pixel_values, labels def _a ( self : Any ) -> str: """simple docstring""" return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def _a ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = BitModel(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.num_labels __SCREAMING_SNAKE_CASE = BitForImageClassification(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _a ( self : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = BitBackbone(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = BitBackbone(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def _a ( self : int ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = config_and_inputs __SCREAMING_SNAKE_CASE = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class A__( __magic_name__ , __magic_name__ , unittest.TestCase ): lowerCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () lowerCAmelCase = ( {'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification} if is_torch_available() else {} ) lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False def _a ( self : Union[str, Any] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = BitModelTester(self ) __SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE ) def _a ( self : Optional[int] ) -> int: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _a ( self : Any ) -> Optional[int]: """simple docstring""" return @unittest.skip(reason='''Bit does not output attentions''' ) def _a ( self : int ) -> List[Any]: """simple docstring""" pass @unittest.skip(reason='''Bit does not use inputs_embeds''' ) def _a ( self : Optional[int] ) -> List[Any]: """simple docstring""" pass @unittest.skip(reason='''Bit does not support input and output embeddings''' ) def _a ( self : Optional[int] ) -> Dict: """simple docstring""" pass def _a ( self : Optional[int] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __SCREAMING_SNAKE_CASE = [*signature.parameters.keys()] __SCREAMING_SNAKE_CASE = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE ) def _a ( self : int ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE ) def _a ( self : Union[str, Any] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__SCREAMING_SNAKE_CASE ) def _a ( self : int ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __SCREAMING_SNAKE_CASE = model_class(config=__SCREAMING_SNAKE_CASE ) for name, module in model.named_modules(): if isinstance(__SCREAMING_SNAKE_CASE , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) def _a ( self : int ) -> Dict: """simple docstring""" def check_hidden_states_output(__SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] ): __SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) __SCREAMING_SNAKE_CASE = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __SCREAMING_SNAKE_CASE = self.model_tester.num_stages self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() __SCREAMING_SNAKE_CASE = ['''preactivation''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: __SCREAMING_SNAKE_CASE = layer_type __SCREAMING_SNAKE_CASE = True check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __SCREAMING_SNAKE_CASE = True check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @unittest.skip(reason='''Bit does not use feedforward chunking''' ) def _a ( self : List[str] ) -> Union[str, Any]: """simple docstring""" pass def _a ( self : Optional[Any] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE ) @slow def _a ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE = BitModel.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) def _a ( ) -> List[Any]: __SCREAMING_SNAKE_CASE = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class A__( unittest.TestCase ): @cached_property def _a ( self : Dict ) -> str: """simple docstring""" return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _a ( self : Tuple ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.default_image_processor __SCREAMING_SNAKE_CASE = prepare_img() __SCREAMING_SNAKE_CASE = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(**__SCREAMING_SNAKE_CASE ) # verify the logits __SCREAMING_SNAKE_CASE = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(__SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) ) @require_torch class A__( __magic_name__ , unittest.TestCase ): lowerCAmelCase = (BitBackbone,) if is_torch_available() else () lowerCAmelCase = BitConfig lowerCAmelCase = False def _a ( self : Dict ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = BitModelTester(self )
482
1
"""simple docstring""" import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import KwargsHandler @dataclass class _lowercase ( _UpperCAmelCase ): '''simple docstring''' _A = 0 _A = False _A = 3.0 class _lowercase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase__ ( self )-> Union[str, Any]: self.assertDictEqual(MockClass().to_kwargs() , {} ) self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"a": 2} ) self.assertDictEqual(MockClass(a=2 , b=lowercase__ ).to_kwargs() , {"a": 2, "b": True} ) self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"a": 2, "c": 2.25} ) @require_cuda def lowerCAmelCase__ ( self )-> int: UpperCAmelCase__ : Optional[int] = GradScalerKwargs(init_scale=10_24 , growth_factor=2 ) AcceleratorState._reset_state() UpperCAmelCase__ : List[Any] = Accelerator(mixed_precision="fp16" , kwargs_handlers=[scaler_handler] ) print(accelerator.use_fpaa ) UpperCAmelCase__ : List[str] = accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale , 1024.0 ) self.assertEqual(scaler._growth_factor , 2.0 ) # Check the other values are at the default self.assertEqual(scaler._backoff_factor , 0.5 ) self.assertEqual(scaler._growth_interval , 20_00 ) self.assertEqual(scaler._enabled , lowercase__ ) @require_multi_gpu def lowerCAmelCase__ ( self )-> List[Any]: UpperCAmelCase__ : int = ["torchrun", F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )] execute_subprocess_async(lowercase__ , env=os.environ.copy() ) if __name__ == "__main__": A__ : Union[str, Any] = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True) A__ : Dict = Accelerator(kwargs_handlers=[ddp_scaler]) A__ : Optional[int] = torch.nn.Linear(100, 200) A__ : int = accelerator.prepare(model) # Check the values changed in kwargs A__ : Dict = """""" A__ : Tuple = model.bucket_bytes_cap // (1_024 * 1_024) if observed_bucket_cap_map != 15: error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
702
"""simple docstring""" def a__ ( lowerCAmelCase : list , lowerCAmelCase : list ): '''simple docstring''' _validate_point(lowerCAmelCase ) _validate_point(lowerCAmelCase ) if len(lowerCAmelCase ) != len(lowerCAmelCase ): raise ValueError("Both points must be in the same n-dimensional space" ) return float(sum(abs(a - b ) for a, b in zip(lowerCAmelCase , lowerCAmelCase ) ) ) def a__ ( lowerCAmelCase : list[float] ): '''simple docstring''' if point: if isinstance(lowerCAmelCase , lowerCAmelCase ): for item in point: if not isinstance(lowerCAmelCase , (int, float) ): UpperCAmelCase__ : Tuple = ( "Expected a list of numbers as input, found " F"{type(lowerCAmelCase ).__name__}" ) raise TypeError(lowerCAmelCase ) else: UpperCAmelCase__ : Dict = F"Expected a list of numbers as input, found {type(lowerCAmelCase ).__name__}" raise TypeError(lowerCAmelCase ) else: raise ValueError("Missing an input" ) def a__ ( lowerCAmelCase : list , lowerCAmelCase : list ): '''simple docstring''' _validate_point(lowerCAmelCase ) _validate_point(lowerCAmelCase ) if len(lowerCAmelCase ) != len(lowerCAmelCase ): raise ValueError("Both points must be in the same n-dimensional space" ) return float(sum(abs(x - y ) for x, y in zip(lowerCAmelCase , lowerCAmelCase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
660
0
"""simple docstring""" from __future__ import annotations def snake_case ( A__ ): UpperCAmelCase_ : Optional[int] = str(A_ ) return n == n[::-1] def snake_case ( A__ = 1_00_00_00 ): UpperCAmelCase_ : List[str] = 0 for i in range(1 ,A_ ): if is_palindrome(A_ ) and is_palindrome(bin(A_ ).split("b" )[1] ): total += i return total if __name__ == "__main__": print(solution(int(str(input().strip()))))
95
import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class _a ( unittest.TestCase): """simple docstring""" def __init__( self: Dict , __lowerCamelCase: Any , __lowerCamelCase: Optional[int]=7 , __lowerCamelCase: Any=3 , __lowerCamelCase: List[str]=18 , __lowerCamelCase: List[Any]=30 , __lowerCamelCase: Tuple=400 , __lowerCamelCase: List[str]=True , __lowerCamelCase: Any=None , __lowerCamelCase: int=True , __lowerCamelCase: Any=None , __lowerCamelCase: Dict=True , __lowerCamelCase: List[Any]=[0.5, 0.5, 0.5] , __lowerCamelCase: Optional[Any]=[0.5, 0.5, 0.5] , __lowerCamelCase: int=False , ): '''simple docstring''' UpperCamelCase__: Optional[int] = size if size is not None else {"height": 20, "width": 20} UpperCamelCase__: List[Any] = crop_size if crop_size is not None else {"height": 18, "width": 18} UpperCamelCase__: Optional[int] = parent UpperCamelCase__: int = batch_size UpperCamelCase__: int = num_channels UpperCamelCase__: str = image_size UpperCamelCase__: Any = min_resolution UpperCamelCase__: Union[str, Any] = max_resolution UpperCamelCase__: Optional[Any] = do_resize UpperCamelCase__: Any = size UpperCamelCase__: str = do_center_crop UpperCamelCase__: Any = crop_size UpperCamelCase__: Any = do_normalize UpperCamelCase__: int = image_mean UpperCamelCase__: Tuple = image_std UpperCamelCase__: int = do_reduce_labels def UpperCAmelCase_ ( self: Tuple ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_reduce_labels": self.do_reduce_labels, } def lowerCAmelCase_ ( ): UpperCamelCase__: Dict = load_dataset("hf-internal-testing/fixtures_ade20k" ,split="test") UpperCamelCase__: Optional[Any] = Image.open(dataset[0]["file"]) UpperCamelCase__: str = Image.open(dataset[1]["file"]) return image, map def lowerCAmelCase_ ( ): UpperCamelCase__: Dict = load_dataset("hf-internal-testing/fixtures_ade20k" ,split="test") UpperCamelCase__: int = Image.open(ds[0]["file"]) UpperCamelCase__: int = Image.open(ds[1]["file"]) UpperCamelCase__: List[str] = Image.open(ds[2]["file"]) UpperCamelCase__: List[Any] = Image.open(ds[3]["file"]) return [imagea, imagea], [mapa, mapa] @require_torch @require_vision class _a ( UpperCamelCase__ , unittest.TestCase): """simple docstring""" UpperCamelCase__ = BeitImageProcessor if is_vision_available() else None def UpperCAmelCase_ ( self: Optional[int] ): '''simple docstring''' UpperCamelCase__: str = BeitImageProcessingTester(self ) @property def UpperCAmelCase_ ( self: Any ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase_ ( self: Dict ): '''simple docstring''' UpperCamelCase__: int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCamelCase , "do_resize" ) ) self.assertTrue(hasattr(__lowerCamelCase , "size" ) ) self.assertTrue(hasattr(__lowerCamelCase , "do_center_crop" ) ) self.assertTrue(hasattr(__lowerCamelCase , "center_crop" ) ) self.assertTrue(hasattr(__lowerCamelCase , "do_normalize" ) ) self.assertTrue(hasattr(__lowerCamelCase , "image_mean" ) ) self.assertTrue(hasattr(__lowerCamelCase , "image_std" ) ) def UpperCAmelCase_ ( self: Optional[int] ): '''simple docstring''' UpperCamelCase__: Tuple = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"height": 20, "width": 20} ) self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} ) self.assertEqual(image_processor.do_reduce_labels , __lowerCamelCase ) UpperCamelCase__: int = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=__lowerCamelCase ) self.assertEqual(image_processor.size , {"height": 42, "width": 42} ) self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} ) self.assertEqual(image_processor.do_reduce_labels , __lowerCamelCase ) def UpperCAmelCase_ ( self: Tuple ): '''simple docstring''' pass def UpperCAmelCase_ ( self: Dict ): '''simple docstring''' UpperCamelCase__: Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase__: Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , Image.Image ) # Test not batched input UpperCamelCase__: List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched UpperCamelCase__: str = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def UpperCAmelCase_ ( self: Dict ): '''simple docstring''' UpperCamelCase__: Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase__: List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , np.ndarray ) # Test not batched input UpperCamelCase__: Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched UpperCamelCase__: Dict = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def UpperCAmelCase_ ( self: Union[str, Any] ): '''simple docstring''' UpperCamelCase__: int = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase__: int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , torch.Tensor ) # Test not batched input UpperCamelCase__: Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched UpperCamelCase__: str = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def UpperCAmelCase_ ( self: Tuple ): '''simple docstring''' UpperCamelCase__: Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase__: Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase ) UpperCamelCase__: str = [] for image in image_inputs: self.assertIsInstance(__lowerCamelCase , torch.Tensor ) maps.append(torch.zeros(image.shape[-2:] ).long() ) # Test not batched input UpperCamelCase__: Dict = image_processing(image_inputs[0] , maps[0] , return_tensors="pt" ) self.assertEqual( encoding["pixel_values"].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) self.assertEqual( encoding["labels"].shape , ( 1, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) self.assertEqual(encoding["labels"].dtype , torch.long ) self.assertTrue(encoding["labels"].min().item() >= 0 ) self.assertTrue(encoding["labels"].max().item() <= 255 ) # Test batched UpperCamelCase__: Any = image_processing(__lowerCamelCase , __lowerCamelCase , return_tensors="pt" ) self.assertEqual( encoding["pixel_values"].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) self.assertEqual( encoding["labels"].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) self.assertEqual(encoding["labels"].dtype , torch.long ) self.assertTrue(encoding["labels"].min().item() >= 0 ) self.assertTrue(encoding["labels"].max().item() <= 255 ) # Test not batched input (PIL images) UpperCamelCase__ , UpperCamelCase__: str = prepare_semantic_single_inputs() UpperCamelCase__: Any = image_processing(__lowerCamelCase , __lowerCamelCase , return_tensors="pt" ) self.assertEqual( encoding["pixel_values"].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) self.assertEqual( encoding["labels"].shape , ( 1, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) self.assertEqual(encoding["labels"].dtype , torch.long ) self.assertTrue(encoding["labels"].min().item() >= 0 ) self.assertTrue(encoding["labels"].max().item() <= 255 ) # Test batched input (PIL images) UpperCamelCase__ , UpperCamelCase__: List[str] = prepare_semantic_batch_inputs() UpperCamelCase__: Optional[int] = image_processing(__lowerCamelCase , __lowerCamelCase , return_tensors="pt" ) self.assertEqual( encoding["pixel_values"].shape , ( 2, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) self.assertEqual( encoding["labels"].shape , ( 2, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) self.assertEqual(encoding["labels"].dtype , torch.long ) self.assertTrue(encoding["labels"].min().item() >= 0 ) self.assertTrue(encoding["labels"].max().item() <= 255 ) def UpperCAmelCase_ ( self: Any ): '''simple docstring''' UpperCamelCase__: Dict = self.image_processing_class(**self.image_processor_dict ) # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150 UpperCamelCase__ , UpperCamelCase__: Any = prepare_semantic_single_inputs() UpperCamelCase__: int = image_processing(__lowerCamelCase , __lowerCamelCase , return_tensors="pt" ) self.assertTrue(encoding["labels"].min().item() >= 0 ) self.assertTrue(encoding["labels"].max().item() <= 150 ) UpperCamelCase__: List[Any] = True UpperCamelCase__: List[str] = image_processing(__lowerCamelCase , __lowerCamelCase , return_tensors="pt" ) self.assertTrue(encoding["labels"].min().item() >= 0 ) self.assertTrue(encoding["labels"].max().item() <= 255 )
380
0
__magic_name__ : List[Any] = { 'A': ['B', 'C', 'E'], 'B': ['A', 'D', 'E'], 'C': ['A', 'F', 'G'], 'D': ['B'], 'E': ['A', 'B', 'D'], 'F': ['C'], 'G': ['C'], } def lowerCAmelCase ( snake_case__ : dict , snake_case__ : List[str] , snake_case__ : Dict )-> list[str]: A_ = set() # keep track of all the paths to be checked A_ = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue A_ = queue.pop(0 ) # get the last node from the path A_ = path[-1] if node not in explored: A_ = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: A_ = list(snake_case__ ) new_path.append(snake_case__ ) queue.append(snake_case__ ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(snake_case__ ) # in case there's no path between the 2 nodes return [] def lowerCAmelCase ( snake_case__ : dict , snake_case__ : Optional[int] , snake_case__ : Optional[int] )-> int: if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 A_ = [start] A_ = set(snake_case__ ) # Keep tab on distances from `start` node. A_ = {start: 0, target: -1} while queue: A_ = queue.pop(0 ) if node == target: A_ = ( dist[node] if dist[target] == -1 else min(dist[target] , dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(snake_case__ ) queue.append(snake_case__ ) A_ = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4
608
import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class lowerCamelCase ( unittest.TestCase ): """simple docstring""" def lowercase_ ( self , __UpperCamelCase ): for model_result in results.values(): for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ): A_ = model_result["result"][batch_size][sequence_length] self.assertIsNotNone(__UpperCamelCase ) def lowercase_ ( self ): A_ = "sshleifer/tiny-gpt2" A_ = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__UpperCamelCase , multi_process=__UpperCamelCase , ) A_ = TensorFlowBenchmark(__UpperCamelCase ) A_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowercase_ ( self ): A_ = "sgugger/tiny-distilbert-classification" A_ = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , only_pretrain_model=__UpperCamelCase , ) A_ = TensorFlowBenchmark(__UpperCamelCase ) A_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowercase_ ( self ): A_ = "sshleifer/tiny-gpt2" A_ = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , ) A_ = TensorFlowBenchmark(__UpperCamelCase ) A_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowercase_ ( self ): A_ = "sshleifer/tiny-gpt2" A_ = AutoConfig.from_pretrained(__UpperCamelCase ) A_ = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__UpperCamelCase , multi_process=__UpperCamelCase , ) A_ = TensorFlowBenchmark(__UpperCamelCase , [config] ) A_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowercase_ ( self ): A_ = "sshleifer/tiny-gpt2" A_ = AutoConfig.from_pretrained(__UpperCamelCase ) A_ = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , ) A_ = TensorFlowBenchmark(__UpperCamelCase , [config] ) A_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowercase_ ( self ): A_ = "sshleifer/tiny-gpt2" A_ = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , ) A_ = TensorFlowBenchmark(__UpperCamelCase ) A_ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def lowercase_ ( self ): A_ = "sshleifer/tiny-gpt2" A_ = AutoConfig.from_pretrained(__UpperCamelCase ) A_ = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , ) A_ = TensorFlowBenchmark(__UpperCamelCase , [config] ) A_ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def lowercase_ ( self ): A_ = "patrickvonplaten/t5-tiny-random" A_ = AutoConfig.from_pretrained(__UpperCamelCase ) A_ = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , ) A_ = TensorFlowBenchmark(__UpperCamelCase , configs=[config] ) A_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("GPU" ) ) == 0 , "Cannot do xla on CPU." ) def lowercase_ ( self ): A_ = "sshleifer/tiny-gpt2" A_ = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , use_xla=__UpperCamelCase , multi_process=__UpperCamelCase , ) A_ = TensorFlowBenchmark(__UpperCamelCase ) A_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowercase_ ( self ): A_ = "sshleifer/tiny-gpt2" with tempfile.TemporaryDirectory() as tmp_dir: A_ = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=__UpperCamelCase , save_to_csv=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__UpperCamelCase , "inf_time.csv" ) , inference_memory_csv_file=os.path.join(__UpperCamelCase , "inf_mem.csv" ) , env_info_csv_file=os.path.join(__UpperCamelCase , "env.csv" ) , multi_process=__UpperCamelCase , ) A_ = TensorFlowBenchmark(__UpperCamelCase ) benchmark.run() self.assertTrue(Path(os.path.join(__UpperCamelCase , "inf_time.csv" ) ).exists() ) self.assertTrue(Path(os.path.join(__UpperCamelCase , "inf_mem.csv" ) ).exists() ) self.assertTrue(Path(os.path.join(__UpperCamelCase , "env.csv" ) ).exists() ) def lowercase_ ( self ): A_ = "sshleifer/tiny-gpt2" def _check_summary_is_not_empty(__UpperCamelCase ): self.assertTrue(hasattr(__UpperCamelCase , "sequential" ) ) self.assertTrue(hasattr(__UpperCamelCase , "cumulative" ) ) self.assertTrue(hasattr(__UpperCamelCase , "current" ) ) self.assertTrue(hasattr(__UpperCamelCase , "total" ) ) with tempfile.TemporaryDirectory() as tmp_dir: A_ = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__UpperCamelCase , "log.txt" ) , log_print=__UpperCamelCase , trace_memory_line_by_line=__UpperCamelCase , eager_mode=__UpperCamelCase , multi_process=__UpperCamelCase , ) A_ = TensorFlowBenchmark(__UpperCamelCase ) A_ = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(__UpperCamelCase , "log.txt" ) ).exists() )
608
1
def _a ( __UpperCamelCase : int ): assert isinstance(lowerCamelCase_ ,lowerCamelCase_ ), f'''The input value of [n={number}] is not an integer''' if number == 1: return 2 elif number < 1: lowerCAmelCase__ : List[Any] = f'''The input value of [n={number}] has to be > 0''' raise ValueError(lowerCamelCase_ ) else: lowerCAmelCase__ : str = sylvester(number - 1 ) lowerCAmelCase__ : Optional[int] = num - 1 lowerCAmelCase__ : Optional[Any] = num return lower * upper + 1 if __name__ == "__main__": print(f"""The 8th number in Sylvester\'s sequence: {sylvester(8)}""")
233
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCamelCase_ = "▁" UpperCamelCase_ = {"vocab_file": "spiece.model"} UpperCamelCase_ = { "vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"} } UpperCamelCase_ = { "google/pegasus-xsum": 5_12, } UpperCamelCase_ = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE ( snake_case ): lowerCamelCase_ = VOCAB_FILES_NAMES lowerCamelCase_ = VOCAB_FILES_NAMES lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase_ = ['input_ids', 'attention_mask'] def __init__( self : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : int="<pad>" , snake_case_ : Any="</s>" , snake_case_ : List[Any]="<unk>" , snake_case_ : Optional[Any]="<mask_2>" , snake_case_ : Union[str, Any]="<mask_1>" , snake_case_ : Any=None , snake_case_ : str=103 , snake_case_ : Optional[Dict[str, Any]] = None , **snake_case_ : Union[str, Any] , ): """simple docstring""" A : str = offset if additional_special_tokens is not None: if not isinstance(snake_case_ , snake_case_ ): raise TypeError( f"""additional_special_tokens should be of type {type(snake_case_ )}, but is""" f""" {type(snake_case_ )}""" ) A : int = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ f"""<unk_{i}>""" for i in range(len(snake_case_ ) , self.offset - 1 ) ] if len(set(snake_case_ ) ) != len(snake_case_ ): raise ValueError( '''Please make sure that the provided additional_special_tokens do not contain an incorrectly''' f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" ) A : Union[str, Any] = additional_special_tokens_extended else: A : Tuple = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset )] A : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=snake_case_ , unk_token=snake_case_ , mask_token=snake_case_ , pad_token=snake_case_ , mask_token_sent=snake_case_ , offset=snake_case_ , additional_special_tokens=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , **snake_case_ , ) A : Dict = mask_token_sent A : Optional[int] = vocab_file A : str = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(snake_case_ ) # add special tokens to encoder dict A : Dict[int, str] = { 0: self.pad_token, 1: self.eos_token, } if self.mask_token_sent is not None: self.encoder.update( { 2: self.mask_token_sent, 3: self.mask_token, } ) if self.offset > 0: # entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102 # mask_token_sent is already added to list -> so start at 1 self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} ) A : Dict[str, int] = {v: k for k, v in self.encoder.items()} @property def _UpperCAmelCase ( self : str ): """simple docstring""" return len(self.sp_model ) + self.offset def _UpperCAmelCase ( self : str ): """simple docstring""" A : Dict = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Tuple ): """simple docstring""" A : Optional[Any] = self.__dict__.copy() A : Union[str, Any] = None return state def __setstate__( self : str , snake_case_ : Tuple ): """simple docstring""" A : str = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): A : Union[str, Any] = {} A : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _UpperCAmelCase ( self : Tuple , snake_case_ : str ): """simple docstring""" return self.sp_model.encode(snake_case_ , out_type=snake_case_ ) def _UpperCAmelCase ( self : Tuple , snake_case_ : str ): """simple docstring""" if token in self.decoder: return self.decoder[token] elif token in self.added_tokens_decoder: return self.added_tokens_decoder[token] A : int = self.sp_model.piece_to_id(snake_case_ ) return sp_id + self.offset def _UpperCAmelCase ( self : Optional[int] , snake_case_ : int ): """simple docstring""" if index in self.encoder: return self.encoder[index] elif index in self.added_tokens_encoder: return self.added_tokens_encoder[index] else: A : Any = self.sp_model.IdToPiece(index - self.offset ) return token def _UpperCAmelCase ( self : Dict , snake_case_ : Dict ): """simple docstring""" A : List[Any] = [] A : Union[str, Any] = '''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(snake_case_ ) + token A : Any = [] else: current_sub_tokens.append(snake_case_ ) out_string += self.sp_model.decode(snake_case_ ) return out_string.strip() def _UpperCAmelCase ( self : str , snake_case_ : Any=False ): """simple docstring""" return 1 def _UpperCAmelCase ( self : int , snake_case_ : Union[str, Any] ): """simple docstring""" A : str = set(self.all_special_ids ) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special return [1 if x in all_special_ids else 0 for x in seq] def _UpperCAmelCase ( self : int , snake_case_ : List , snake_case_ : Optional[List] = None , snake_case_ : bool = False ): """simple docstring""" if already_has_special_tokens: return self._special_token_mask(snake_case_ ) elif token_ids_a is None: return self._special_token_mask(snake_case_ ) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a ) + [1] def _UpperCAmelCase ( self : List[Any] , snake_case_ : Any , snake_case_ : Tuple=None ): """simple docstring""" if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def _UpperCAmelCase ( self : Tuple , snake_case_ : str , snake_case_ : Optional[str] = None ): """simple docstring""" if not os.path.isdir(snake_case_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return A : int = os.path.join( snake_case_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , snake_case_ ) elif not os.path.isfile(self.vocab_file ): with open(snake_case_ , '''wb''' ) as fi: A : Any = self.sp_model.serialized_model_proto() fi.write(snake_case_ ) return (out_vocab_file,)
256
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE_ : Optional[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_ : str = { 'facebook/data2vec-vision-base-ft': ( 'https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json' ), } class a ( _lowerCamelCase ): """simple docstring""" UpperCAmelCase = "data2vec-vision" def __init__( self: Optional[int] , UpperCamelCase: Optional[int]=7_68 , UpperCamelCase: List[Any]=12 , UpperCamelCase: str=12 , UpperCamelCase: Any=30_72 , UpperCamelCase: int="gelu" , UpperCamelCase: Optional[int]=0.0 , UpperCamelCase: List[str]=0.0 , UpperCamelCase: str=0.02 , UpperCamelCase: Optional[Any]=1e-1_2 , UpperCamelCase: Tuple=2_24 , UpperCamelCase: Union[str, Any]=16 , UpperCamelCase: Dict=3 , UpperCamelCase: Optional[int]=False , UpperCamelCase: Optional[Any]=False , UpperCamelCase: Union[str, Any]=False , UpperCamelCase: str=False , UpperCamelCase: List[str]=0.1 , UpperCamelCase: List[str]=0.1 , UpperCamelCase: Optional[int]=True , UpperCamelCase: str=[3, 5, 7, 11] , UpperCamelCase: Optional[Any]=[1, 2, 3, 6] , UpperCamelCase: Optional[Any]=True , UpperCamelCase: Dict=0.4 , UpperCamelCase: List[str]=2_56 , UpperCamelCase: int=1 , UpperCamelCase: Optional[Any]=False , UpperCamelCase: Optional[Any]=2_55 , **UpperCamelCase: Optional[Any] , ): """simple docstring""" super().__init__(**UpperCamelCase ) A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = initializer_range A__ = layer_norm_eps A__ = image_size A__ = patch_size A__ = num_channels A__ = use_mask_token A__ = use_absolute_position_embeddings A__ = use_relative_position_bias A__ = use_shared_relative_position_bias A__ = layer_scale_init_value A__ = drop_path_rate A__ = use_mean_pooling # decode head attributes (semantic segmentation) A__ = out_indices A__ = pool_scales # auxiliary head attributes (semantic segmentation) A__ = use_auxiliary_head A__ = auxiliary_loss_weight A__ = auxiliary_channels A__ = auxiliary_num_convs A__ = auxiliary_concat_input A__ = semantic_loss_ignore_index class a ( _lowerCamelCase ): """simple docstring""" UpperCAmelCase = version.parse("1.11" ) @property def UpperCamelCase ( self: Union[str, Any] ): """simple docstring""" return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def UpperCamelCase ( self: List[Any] ): """simple docstring""" return 1e-4
500
"""simple docstring""" SCREAMING_SNAKE_CASE_ : str = '0.21.0' from .accelerator import Accelerator from .big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from .data_loader import skip_first_batches from .launchers import debug_launcher, notebook_launcher from .state import PartialState from .utils import ( DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, FullyShardedDataParallelPlugin, GradScalerKwargs, InitProcessGroupKwargs, find_executable_batch_size, infer_auto_device_map, is_rich_available, load_checkpoint_in_model, synchronize_rng_states, ) if is_rich_available(): from .utils import rich
500
1
import math from collections.abc import Iterator from itertools import takewhile def UpperCAmelCase__ (UpperCamelCase_ ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 ,int(math.sqrt(UpperCamelCase_ ) + 1 ) ,6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def UpperCAmelCase__ (): """simple docstring""" snake_case = 2 while True: if is_prime(UpperCamelCase_ ): yield num num += 1 def UpperCAmelCase__ (UpperCamelCase_ = 2_00_00_00 ): """simple docstring""" return sum(takewhile(lambda UpperCamelCase_ : x < n ,prime_generator() ) ) if __name__ == "__main__": print(f'''{solution() = }''')
550
from __future__ import annotations import random import unittest from transformers import TransfoXLConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLModel, ) class A__ : """simple docstring""" def __init__( self , __snake_case , ): snake_case = parent snake_case = 1_3 snake_case = 7 snake_case = 3_0 snake_case = self.seq_length + self.mem_len snake_case = 1_5 snake_case = True snake_case = True snake_case = 9_9 snake_case = [1_0, 5_0, 8_0] snake_case = 3_2 snake_case = 3_2 snake_case = 4 snake_case = 8 snake_case = 1_2_8 snake_case = 2 snake_case = 2 snake_case = None snake_case = 1 snake_case = 0 snake_case = 3 snake_case = self.vocab_size - 1 snake_case = 0.01 def a_ ( self ): snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case = None if self.use_labels: snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case = TransfoXLConfig( vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , ) return (config, input_ids_a, input_ids_a, lm_labels) def a_ ( self ): random.seed(self.seed ) tf.random.set_seed(self.seed ) def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case ): snake_case = TFTransfoXLModel(__snake_case ) snake_case , snake_case = model(__snake_case ).to_tuple() snake_case = {'''input_ids''': input_ids_a, '''mems''': mems_a} snake_case , snake_case = model(__snake_case ).to_tuple() self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case ): snake_case = TFTransfoXLLMHeadModel(__snake_case ) snake_case , snake_case = model(__snake_case ).to_tuple() snake_case = {'''input_ids''': input_ids_a, '''labels''': lm_labels} snake_case , snake_case = model(__snake_case ).to_tuple() snake_case , snake_case = model([input_ids_a, mems_a] ).to_tuple() snake_case = {'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels} snake_case , snake_case = model(__snake_case ).to_tuple() self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case ): snake_case = TFTransfoXLForSequenceClassification(__snake_case ) snake_case = model(__snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a_ ( self ): snake_case = self.prepare_config_and_inputs() ((snake_case) , (snake_case) , (snake_case) , (snake_case)) = config_and_inputs snake_case = {'''input_ids''': input_ids_a} return config, inputs_dict @require_tf class A__ ( snake_case__ , snake_case__ , unittest.TestCase ): """simple docstring""" __magic_name__ = ( (TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else () ) __magic_name__ = () if is_tf_available() else () __magic_name__ = ( { 'feature-extraction': TFTransfoXLModel, 'text-classification': TFTransfoXLForSequenceClassification, 'text-generation': TFTransfoXLLMHeadModel, 'zero-shot': TFTransfoXLForSequenceClassification, } if is_tf_available() else {} ) # TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = False def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ): if pipeline_test_casse_name == "TextGenerationPipelineTests": # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`. # `TransfoXLConfig` was never used in pipeline tests: cannot create a simple # tokenizer. return True return False def a_ ( self ): snake_case = TFTransfoXLModelTester(self ) snake_case = ConfigTester(self , config_class=__snake_case , d_embed=3_7 ) def a_ ( self ): self.config_tester.run_common_tests() def a_ ( self ): self.model_tester.set_seed() snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_model(*__snake_case ) def a_ ( self ): self.model_tester.set_seed() snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_lm_head(*__snake_case ) def a_ ( self ): snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*__snake_case ) def a_ ( self ): snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common() snake_case = [TFTransfoXLForSequenceClassification] for model_class in self.all_model_classes: snake_case = model_class(__snake_case ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class in list_other_models_with_output_ebd: snake_case = model.get_output_embeddings() assert isinstance(__snake_case , tf.keras.layers.Layer ) snake_case = model.get_bias() assert name is None else: snake_case = model.get_output_embeddings() assert x is None snake_case = model.get_bias() assert name is None def a_ ( self ): # TODO JP: Make TransfoXL XLA compliant pass @slow def a_ ( self ): for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case = TFTransfoXLModel.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) @unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' ) def a_ ( self ): pass @require_tf class A__ ( unittest.TestCase ): """simple docstring""" @unittest.skip('''Skip test until #12651 is resolved.''' ) @slow def a_ ( self ): snake_case = TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' ) # fmt: off snake_case = tf.convert_to_tensor([[3_3,1_2_9_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_2,1_7_0_6,1_7,2_0_0_9_8,5,3_2_1_5,2_1,3_7,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,6_2_2_4,8_3_1,1_6_0_0_2,2,8,6_0_3,7_8_9_6_7,2_9_5_4_6,2_3,8_0_3,2_0,2_5,4_1_6,5,8,2_3_2,4,2_7_7,6,1_8_5_5,4_6_0_1,3,2_9_5_4_6,5_4,8,3_6_0_9,5,5_7_2_1_1,4_9,4,1,2_7_7,1_8,8,1_7_5_5,1_5_6_9_1,3,3_4_1,2_5,4_1_6,6_9_3,4_2_5_7_3,7_1,1_7,4_0_1,9_4,3_1,1_7_9_1_9,2,2_9_5_4_6,7_8_7_3,1_8,1,4_3_5,2_3,1_1_0_1_1,7_5_5,5,5_1_6_7,3,7_9_8_3,9_8,8_4,2,2_9_5_4_6,3_2_6_7,8,3_6_0_9,4,1,4_8_6_5,1_0_7_5,2,6_0_8_7,7_1,6,3_4_6,8,5_8_5_4,3,2_9_5_4_6,8_2_4,1_4_0_0,1_8_6_8,2,1_9,1_6_0,2,3_1_1,8,5_4_9_6,2,2_0_9_2_0,1_7,2_5,1_5_0_9_7,3,2_4,2_4,0]] , dtype=tf.intaa ) # noqa: E231 # fmt: on # In 1991 , the remains of Russian Tsar Nicholas II and his family # ( except for Alexei and Maria ) are discovered . # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the # remainder of the story . 1883 Western Siberia , # a young Grigori Rasputin is asked by his father and a group of men to perform magic . # Rasputin has a vision and denounces one of the men as a horse thief . Although his # father initially slaps him for making such an accusation , Rasputin watches as the # man is chased outside and beaten . Twenty years later , Rasputin sees a vision of # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous , # with people , even a bishop , begging for his blessing . <eod> </s> <eos> # fmt: off snake_case = [3_3,1_2_9_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_2,1_7_0_6,1_7,2_0_0_9_8,5,3_2_1_5,2_1,3_7,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,6_2_2_4,8_3_1,1_6_0_0_2,2,8,6_0_3,7_8_9_6_7,2_9_5_4_6,2_3,8_0_3,2_0,2_5,4_1_6,5,8,2_3_2,4,2_7_7,6,1_8_5_5,4_6_0_1,3,2_9_5_4_6,5_4,8,3_6_0_9,5,5_7_2_1_1,4_9,4,1,2_7_7,1_8,8,1_7_5_5,1_5_6_9_1,3,3_4_1,2_5,4_1_6,6_9_3,4_2_5_7_3,7_1,1_7,4_0_1,9_4,3_1,1_7_9_1_9,2,2_9_5_4_6,7_8_7_3,1_8,1,4_3_5,2_3,1_1_0_1_1,7_5_5,5,5_1_6_7,3,7_9_8_3,9_8,8_4,2,2_9_5_4_6,3_2_6_7,8,3_6_0_9,4,1,4_8_6_5,1_0_7_5,2,6_0_8_7,7_1,6,3_4_6,8,5_8_5_4,3,2_9_5_4_6,8_2_4,1_4_0_0,1_8_6_8,2,1_9,1_6_0,2,3_1_1,8,5_4_9_6,2,2_0_9_2_0,1_7,2_5,1_5_0_9_7,3,2_4,2_4,0,3_3,1,1_8_5_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_8,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,0] # noqa: E231 # fmt: on # In 1991, the remains of Russian Tsar Nicholas II and his family ( # except for Alexei and Maria ) are discovered. The voice of young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story. # 1883 Western Siberia, a young Grigori Rasputin is asked by his father # and a group of men to perform magic. Rasputin has a vision and # denounces one of the men as a horse thief. Although his father initially # slaps him for making such an accusation, Rasputin watches as the man # is chased outside and beaten. Twenty years later, Rasputin sees a vision # of the Virgin Mary, prompting him to become a priest. # Rasputin quickly becomes famous, with people, even a bishop, begging for # his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar # Nicholas II and his family were discovered. The voice of <unk> young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos> snake_case = model.generate(__snake_case , max_length=2_0_0 , do_sample=__snake_case ) self.assertListEqual(output_ids[0].numpy().tolist() , __snake_case )
550
1
import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize("dataset_size" , [None, 400 * 2**20, 600 * 2**20] ) @pytest.mark.parametrize("input_in_memory_max_size" , ["default", 0, 100 * 2**20, 900 * 2**20] ) def UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): '''simple docstring''' if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config , "IN_MEMORY_MAX_SIZE" , lowerCAmelCase__ ) __A = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: __A = dataset_size < in_memory_max_size else: __A = False __A = is_small_dataset(lowerCAmelCase__ ) assert result == expected
716
from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time snake_case_ : Optional[Any] =Lock() def UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): '''simple docstring''' global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 10 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(lowerCAmelCase__ ) process_lock.release() # receive your right neighbor's value process_lock.acquire() __A = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left __A = min(lowerCAmelCase__ , lowerCAmelCase__ ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(lowerCAmelCase__ ) process_lock.release() # receive your left neighbor's value process_lock.acquire() __A = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right __A = max(lowerCAmelCase__ , lowerCAmelCase__ ) # after all swaps are performed, send the values back to main result_pipe[1].send(lowerCAmelCase__ ) def UpperCAmelCase ( lowerCAmelCase__ ): '''simple docstring''' __A = [] __A = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop __A = Pipe() __A = Pipe() process_array_.append( Process( target=lowerCAmelCase__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) __A = temp_rs __A = temp_rr for i in range(1 , len(lowerCAmelCase__ ) - 1 ): __A = Pipe() __A = Pipe() process_array_.append( Process( target=lowerCAmelCase__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) __A = temp_rs __A = temp_rr process_array_.append( Process( target=lowerCAmelCase__ , args=( len(lowerCAmelCase__ ) - 1, arr[len(lowerCAmelCase__ ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(lowerCAmelCase__ ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(lowerCAmelCase__ ) ): __A = result_pipe[p][0].recv() process_array_[p].join() return arr def UpperCAmelCase ( ): '''simple docstring''' __A = list(range(10 , 0 , -1 ) ) print("Initial List" ) print(*lowerCAmelCase__ ) __A = odd_even_transposition(lowerCAmelCase__ ) print("Sorted List\n" ) print(*lowerCAmelCase__ ) if __name__ == "__main__": main()
205
0
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING SCREAMING_SNAKE_CASE :int = logging.get_logger(__name__) class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = "upernet" def __init__( self : List[Any] ,A : List[Any]=None ,A : Optional[Any]=5_12 ,A : Union[str, Any]=0.02 ,A : Any=[1, 2, 3, 6] ,A : Dict=True ,A : List[Any]=0.4 ,A : Dict=3_84 ,A : List[str]=2_56 ,A : List[str]=1 ,A : Optional[Any]=False ,A : List[Any]=2_55 ,**A : str ,): super().__init__(**A ) if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) __A = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] ) elif isinstance(A ,A ): __A = backbone_config.get("model_type" ) __A = CONFIG_MAPPING[backbone_model_type] __A = config_class.from_dict(A ) __A = backbone_config __A = hidden_size __A = initializer_range __A = pool_scales __A = use_auxiliary_head __A = auxiliary_loss_weight __A = auxiliary_in_channels __A = auxiliary_channels __A = auxiliary_num_convs __A = auxiliary_concat_input __A = loss_ignore_index def UpperCamelCase_ ( self : Union[str, Any] ): __A = copy.deepcopy(self.__dict__ ) __A = self.backbone_config.to_dict() __A = self.__class__.model_type return output
55
"""simple docstring""" import os from typing import List, Optional, Union from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType from ..auto import AutoTokenizer class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = ['''image_processor''', '''tokenizer'''] _lowerCamelCase = '''BlipImageProcessor''' _lowerCamelCase = '''AutoTokenizer''' def __init__( self , _lowercase , _lowercase , _lowercase ) -> Optional[int]: '''simple docstring''' super().__init__(_lowercase , _lowercase ) # add QFormer tokenizer snake_case_ : List[str] = qformer_tokenizer def __call__( self , _lowercase = None , _lowercase = None , _lowercase = True , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = 0 , _lowercase = None , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = False , _lowercase = False , _lowercase = False , _lowercase = True , _lowercase = None , **_lowercase , ) -> BatchFeature: '''simple docstring''' if images is None and text is None: raise ValueError("""You have to specify at least images or text.""" ) snake_case_ : Optional[Any] = BatchFeature() if text is not None: snake_case_ : List[str] = self.tokenizer( text=_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , stride=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=_lowercase , return_overflowing_tokens=_lowercase , return_special_tokens_mask=_lowercase , return_offsets_mapping=_lowercase , return_token_type_ids=_lowercase , return_length=_lowercase , verbose=_lowercase , return_tensors=_lowercase , **_lowercase , ) encoding.update(_lowercase ) snake_case_ : Union[str, Any] = self.qformer_tokenizer( text=_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , stride=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=_lowercase , return_overflowing_tokens=_lowercase , return_special_tokens_mask=_lowercase , return_offsets_mapping=_lowercase , return_token_type_ids=_lowercase , return_length=_lowercase , verbose=_lowercase , return_tensors=_lowercase , **_lowercase , ) snake_case_ : List[str] = qformer_text_encoding.pop("""input_ids""" ) snake_case_ : Union[str, Any] = qformer_text_encoding.pop("""attention_mask""" ) if images is not None: snake_case_ : Tuple = self.image_processor(_lowercase , return_tensors=_lowercase ) encoding.update(_lowercase ) return encoding def UpperCAmelCase__ ( self , *_lowercase , **_lowercase ) -> List[Any]: '''simple docstring''' return self.tokenizer.batch_decode(*_lowercase , **_lowercase ) def UpperCAmelCase__ ( self , *_lowercase , **_lowercase ) -> Dict: '''simple docstring''' return self.tokenizer.decode(*_lowercase , **_lowercase ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' snake_case_ : str = self.tokenizer.model_input_names snake_case_ : List[Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) def UpperCAmelCase__ ( self , _lowercase , **_lowercase ) -> Optional[int]: '''simple docstring''' if os.path.isfile(_lowercase ): raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' ) os.makedirs(_lowercase , exist_ok=_lowercase ) snake_case_ : int = os.path.join(_lowercase , """qformer_tokenizer""" ) self.qformer_tokenizer.save_pretrained(_lowercase ) return super().save_pretrained(_lowercase , **_lowercase ) @classmethod def UpperCAmelCase__ ( cls , _lowercase , **_lowercase ) -> int: '''simple docstring''' snake_case_ : List[str] = AutoTokenizer.from_pretrained(_lowercase , subfolder="""qformer_tokenizer""" ) snake_case_ : Union[str, Any] = cls._get_arguments_from_pretrained(_lowercase , **_lowercase ) args.append(_lowercase ) return cls(*_lowercase )
58
0
import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, UNetaDConditionModel, VideoToVideoSDPipeline, ) from diffusers.utils import floats_tensor, is_xformers_available, skip_mps from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class lowerCAmelCase_ ( __a , unittest.TestCase ): __UpperCAmelCase = VideoToVideoSDPipeline __UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({'video'} ) - {'image', 'width', 'height'} __UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'video'} ) - {'image'} __UpperCAmelCase = PipelineTesterMixin.required_optional_params - {'latents'} __UpperCAmelCase = False # No `output_type`. __UpperCAmelCase = frozenset( [ 'num_inference_steps', 'generator', 'latents', 'return_dict', 'callback', 'callback_steps', ] ) def __snake_case ( self : str ): '''simple docstring''' torch.manual_seed(0 ) snake_case : int =UNetaDConditionModel( block_out_channels=(32, 64, 64, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D'''), up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D'''), cross_attention_dim=32, attention_head_dim=4, ) snake_case : Optional[Any] =DDIMScheduler( beta_start=0.0_0085, beta_end=0.012, beta_schedule='''scaled_linear''', clip_sample=a_, set_alpha_to_one=a_, ) torch.manual_seed(0 ) snake_case : Any =AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], latent_channels=4, sample_size=128, ) torch.manual_seed(0 ) snake_case : Optional[Any] =CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='''gelu''', projection_dim=512, ) snake_case : Optional[Any] =CLIPTextModel(a_ ) snake_case : Union[str, Any] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) snake_case : Dict ={ """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, } return components def __snake_case ( self : Dict, _snake_case : Tuple, _snake_case : List[Any]=0 ): '''simple docstring''' snake_case : List[str] =floats_tensor((1, 3, 3, 32, 32), rng=random.Random(a_ ) ).to(a_ ) if str(a_ ).startswith('''mps''' ): snake_case : Dict =torch.manual_seed(a_ ) else: snake_case : List[Any] =torch.Generator(device=a_ ).manual_seed(a_ ) snake_case : int ={ """prompt""": """A painting of a squirrel eating a burger""", """video""": video, """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """pt""", } return inputs def __snake_case ( self : str ): '''simple docstring''' snake_case : Optional[Any] ="""cpu""" # ensure determinism for the device-dependent torch.Generator snake_case : List[Any] =self.get_dummy_components() snake_case : int =VideoToVideoSDPipeline(**a_ ) snake_case : int =sd_pipe.to(a_ ) sd_pipe.set_progress_bar_config(disable=a_ ) snake_case : Optional[Any] =self.get_dummy_inputs(a_ ) snake_case : Tuple ="""np""" snake_case : Tuple =sd_pipe(**a_ ).frames snake_case : Any =frames[0][-3:, -3:, -1] assert frames[0].shape == (32, 32, 3) snake_case : int =np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available(), reason='''XFormers attention is only available with CUDA and `xformers` installed''', ) def __snake_case ( self : Tuple ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=a_, expected_max_diff=5E-3 ) @unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' ) def __snake_case ( self : Any ): '''simple docstring''' pass @unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' ) def __snake_case ( self : List[Any] ): '''simple docstring''' pass @unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' ) def __snake_case ( self : List[Any] ): '''simple docstring''' pass def __snake_case ( self : int ): '''simple docstring''' return super().test_progress_bar() @slow @skip_mps class lowerCAmelCase_ ( unittest.TestCase ): def __snake_case ( self : List[Any] ): '''simple docstring''' snake_case : Union[str, Any] =VideoToVideoSDPipeline.from_pretrained('''cerspense/zeroscope_v2_XL''', torch_dtype=torch.floataa ) pipe.enable_model_cpu_offload() # 10 frames snake_case : List[Any] =torch.Generator(device='''cpu''' ).manual_seed(0 ) snake_case : Any =torch.randn((1, 10, 3, 1_024, 576), generator=a_ ) snake_case : Union[str, Any] =video.to('''cuda''' ) snake_case : List[Any] ="""Spiderman is surfing""" snake_case : Any =pipe(a_, video=a_, generator=a_, num_inference_steps=3, output_type='''pt''' ).frames snake_case : int =np.array([-1.045_8984, -1.127_9297, -0.966_3086, -0.9150_3906, -0.7509_7656] ) assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
706
'''simple docstring''' def _a ( lowerCamelCase_ ): if not head: return True # split the list to two parts snake_case , snake_case : Tuple =head.next, head while fast and fast.next: snake_case : Union[str, Any] =fast.next.next snake_case : Optional[Any] =slow.next snake_case : Dict =slow.next snake_case : Union[str, Any] =None # Don't forget here! But forget still works! # reverse the second part snake_case : Any =None while second: snake_case : str =second.next snake_case : Tuple =node snake_case : Union[str, Any] =second snake_case : List[str] =nxt # compare two parts # second part has the same or one less node while node: if node.val != head.val: return False snake_case : str =node.next snake_case : List[Any] =head.next return True def _a ( lowerCamelCase_ ): if not head or not head.next: return True # 1. Get the midpoint (slow) snake_case : Tuple =head while fast and fast.next: snake_case , snake_case : int =fast.next.next, slow.next # 2. Push the second half into the stack snake_case : Optional[Any] =[slow.val] while slow.next: snake_case : str =slow.next stack.append(slow.val ) # 3. Comparison while stack: if stack.pop() != cur.val: return False snake_case : Any =cur.next return True def _a ( lowerCamelCase_ ): if not head or not head.next: return True snake_case : int ={} snake_case : List[str] =0 while head: if head.val in d: d[head.val].append(lowerCamelCase_ ) else: snake_case : Optional[Any] =[pos] snake_case : str =head.next pos += 1 snake_case : Union[str, Any] =pos - 1 snake_case : Optional[int] =0 for v in d.values(): if len(lowerCamelCase_ ) % 2 != 0: middle += 1 else: snake_case : Tuple =0 for i in range(0 , len(lowerCamelCase_ ) ): if v[i] + v[len(lowerCamelCase_ ) - 1 - step] != checksum: return False step += 1 if middle > 1: return False return True
136
0
class UpperCAmelCase : '''simple docstring''' def __init__( self : Tuple ,A : Union[str, Any] ): __A = arr.split("," ) def UpperCamelCase_ ( self : Any ): __A = [int(self.array[0] )] * len(self.array ) __A = [int(self.array[0] )] * len(self.array ) for i in range(1 ,len(self.array ) ): __A = max( int(self.array[i] ) + sum_value[i - 1] ,int(self.array[i] ) ) __A = max(sum_value[i] ,rear[i - 1] ) return rear[len(self.array ) - 1] if __name__ == "__main__": SCREAMING_SNAKE_CASE :Dict = input('please input some numbers:') SCREAMING_SNAKE_CASE :List[Any] = SubArray(whole_array) SCREAMING_SNAKE_CASE :List[Any] = array.solve_sub_array() print(('the results is:', re))
55
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ = { 'configuration_instructblip': [ 'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'InstructBlipConfig', 'InstructBlipQFormerConfig', 'InstructBlipVisionConfig', ], 'processing_instructblip': ['InstructBlipProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ 'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'InstructBlipQFormerModel', 'InstructBlipPreTrainedModel', 'InstructBlipForConditionalGeneration', 'InstructBlipVisionModel', ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
25
0
from math import pi def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: int ) -> float: '''simple docstring''' return 2 * pi * radius * (angle / 3_6_0) if __name__ == "__main__": print(arc_length(9_0, 1_0))
626
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetaImageProcessor class a__ ( unittest.TestCase ): """simple docstring""" def __init__( self , lowercase , lowercase=7 , lowercase=3 , lowercase=30 , lowercase=400 , lowercase=True , lowercase=None , lowercase=True , lowercase=[0.5, 0.5, 0.5] , lowercase=[0.5, 0.5, 0.5] , lowercase=True , lowercase=1 / 255 , lowercase=True , ) -> Union[str, Any]: '''simple docstring''' A__ = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333} A__ = parent A__ = batch_size A__ = num_channels A__ = min_resolution A__ = max_resolution A__ = do_resize A__ = size A__ = do_normalize A__ = image_mean A__ = image_std A__ = do_rescale A__ = rescale_factor A__ = do_pad def UpperCamelCase ( self ) -> Dict: '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def UpperCamelCase ( self , lowercase , lowercase=False ) -> int: '''simple docstring''' if not batched: A__ = image_inputs[0] if isinstance(lowercase , Image.Image ): A__ , A__ = image.size else: A__ , A__ = image.shape[1], image.shape[2] if w < h: A__ = int(self.size["shortest_edge"] * h / w ) A__ = self.size["shortest_edge"] elif w > h: A__ = self.size["shortest_edge"] A__ = int(self.size["shortest_edge"] * w / h ) else: A__ = self.size["shortest_edge"] A__ = self.size["shortest_edge"] else: A__ = [] for image in image_inputs: A__ , A__ = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) A__ = max(lowercase , key=lambda lowercase : item[0] )[0] A__ = max(lowercase , key=lambda lowercase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class a__ ( snake_case , unittest.TestCase ): """simple docstring""" __lowerCamelCase = DetaImageProcessor if is_vision_available() else None def UpperCamelCase ( self ) -> Optional[int]: '''simple docstring''' A__ = DetaImageProcessingTester(self ) @property def UpperCamelCase ( self ) -> Any: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowercase , "image_mean" ) ) self.assertTrue(hasattr(lowercase , "image_std" ) ) self.assertTrue(hasattr(lowercase , "do_normalize" ) ) self.assertTrue(hasattr(lowercase , "do_resize" ) ) self.assertTrue(hasattr(lowercase , "do_rescale" ) ) self.assertTrue(hasattr(lowercase , "do_pad" ) ) self.assertTrue(hasattr(lowercase , "size" ) ) def UpperCamelCase ( self ) -> int: '''simple docstring''' A__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} ) self.assertEqual(image_processor.do_pad , lowercase ) def UpperCamelCase ( self ) -> int: '''simple docstring''' pass def UpperCamelCase ( self ) -> Any: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase ) for image in image_inputs: self.assertIsInstance(lowercase , Image.Image ) # Test not batched input A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(lowercase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A__ , A__ = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase ) A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase ) for image in image_inputs: self.assertIsInstance(lowercase , np.ndarray ) # Test not batched input A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(lowercase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase ) for image in image_inputs: self.assertIsInstance(lowercase , torch.Tensor ) # Test not batched input A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(lowercase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' A__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f: A__ = json.loads(f.read() ) A__ = {"image_id": 39769, "annotations": target} # encode them A__ = DetaImageProcessor() A__ = image_processing(images=lowercase , annotations=lowercase , return_tensors="pt" ) # verify pixel values A__ = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding["pixel_values"].shape , lowercase ) A__ = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowercase , atol=1e-4 ) ) # verify area A__ = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowercase ) ) # verify boxes A__ = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , lowercase ) A__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowercase , atol=1e-3 ) ) # verify image_id A__ = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowercase ) ) # verify is_crowd A__ = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowercase ) ) # verify class_labels A__ = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowercase ) ) # verify orig_size A__ = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowercase ) ) # verify size A__ = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowercase ) ) @slow def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' A__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f: A__ = json.loads(f.read() ) A__ = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target} A__ = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" ) # encode them A__ = DetaImageProcessor(format="coco_panoptic" ) A__ = image_processing(images=lowercase , annotations=lowercase , masks_path=lowercase , return_tensors="pt" ) # verify pixel values A__ = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding["pixel_values"].shape , lowercase ) A__ = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowercase , atol=1e-4 ) ) # verify area A__ = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowercase ) ) # verify boxes A__ = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , lowercase ) A__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowercase , atol=1e-3 ) ) # verify image_id A__ = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowercase ) ) # verify is_crowd A__ = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowercase ) ) # verify class_labels A__ = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowercase ) ) # verify masks A__ = 822873 self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowercase ) # verify orig_size A__ = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowercase ) ) # verify size A__ = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowercase ) )
626
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available a_ :Tuple = { "configuration_bridgetower": [ "BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP", "BridgeTowerConfig", "BridgeTowerTextConfig", "BridgeTowerVisionConfig", ], "processing_bridgetower": ["BridgeTowerProcessor"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ :List[Any] = ["BridgeTowerImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ :int = [ "BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST", "BridgeTowerForContrastiveLearning", "BridgeTowerForImageAndTextRetrieval", "BridgeTowerForMaskedLM", "BridgeTowerModel", "BridgeTowerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_bridgetower import ( BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP, BridgeTowerConfig, BridgeTowerTextConfig, BridgeTowerVisionConfig, ) from .processing_bridgetower import BridgeTowerProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_bridgetower import BridgeTowerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bridgetower import ( BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST, BridgeTowerForContrastiveLearning, BridgeTowerForImageAndTextRetrieval, BridgeTowerForMaskedLM, BridgeTowerModel, BridgeTowerPreTrainedModel, ) else: import sys a_ :List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
478
import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging a_ :Tuple = "\\n\n" a_ :Optional[int] = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n" a_ :str = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case__ ( datasets.Metric ): """simple docstring""" def lowercase_ ( self : str ) ->List[Any]: return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { 'input_texts': datasets.Value('string' ), } ), reference_urls=['https://huggingface.co/docs/transformers/perplexity'], ) def lowercase_ ( self : int, _snake_case : Optional[Any], _snake_case : Tuple, _snake_case : int = 1_6, _snake_case : bool = True, _snake_case : Any=None ) ->Any: if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": snake_case__ : Any = 'cuda' else: snake_case__ : Any = 'cuda' if torch.cuda.is_available() else 'cpu' snake_case__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(_snake_case ) snake_case__ : List[str] = model.to(_snake_case ) snake_case__ : Dict = AutoTokenizer.from_pretrained(_snake_case ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: snake_case__ : str = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(_snake_case ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" snake_case__ : List[Any] = model.config.max_length - 1 else: snake_case__ : str = model.config.max_length snake_case__ : Union[str, Any] = tokenizer( _snake_case, add_special_tokens=_snake_case, padding=_snake_case, truncation=_snake_case, max_length=_snake_case, return_tensors='pt', return_attention_mask=_snake_case, ).to(_snake_case ) snake_case__ : Union[str, Any] = encodings['input_ids'] snake_case__ : str = encodings['attention_mask'] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ), 1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ), 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." snake_case__ : int = [] snake_case__ : Tuple = CrossEntropyLoss(reduction='none' ) for start_index in logging.tqdm(range(0, len(_snake_case ), _snake_case ) ): snake_case__ : Tuple = min(start_index + batch_size, len(_snake_case ) ) snake_case__ : Tuple = encoded_texts[start_index:end_index] snake_case__ : Dict = attn_masks[start_index:end_index] if add_start_token: snake_case__ : Any = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_snake_case ) snake_case__ : Optional[Any] = torch.cat([bos_tokens_tensor, encoded_batch], dim=1 ) snake_case__ : Optional[Any] = torch.cat( [torch.ones(bos_tokens_tensor.size(), dtype=torch.intaa ).to(_snake_case ), attn_mask], dim=1 ) snake_case__ : Union[str, Any] = encoded_batch with torch.no_grad(): snake_case__ : int = model(_snake_case, attention_mask=_snake_case ).logits snake_case__ : Tuple = out_logits[..., :-1, :].contiguous() snake_case__ : Optional[int] = labels[..., 1:].contiguous() snake_case__ : Dict = attn_mask[..., 1:].contiguous() snake_case__ : Dict = torch.expa( (loss_fct(shift_logits.transpose(1, 2 ), _snake_case ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(_snake_case )}
478
1
import argparse import glob import importlib.util import os import re import black from doc_builder.style_doc import style_docstrings_in_code # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py lowerCamelCase_ : Optional[int] = "src/diffusers" lowerCamelCase_ : str = "." # This is to make sure the diffusers module imported is the one in the repo. lowerCamelCase_ : List[str] = importlib.util.spec_from_file_location( "diffusers", os.path.join(DIFFUSERS_PATH, "__init__.py"), submodule_search_locations=[DIFFUSERS_PATH], ) lowerCamelCase_ : str = spec.loader.load_module() def __lowercase( __snake_case : str ,__snake_case : List[str] ) -> Any: return line.startswith(__snake_case ) or len(__snake_case ) <= 1 or re.search(r'^\s*\)(\s*->.*:|:)\s*$' ,__snake_case ) is not None def __lowercase( __snake_case : Optional[int] ) -> Optional[Any]: __snake_case = object_name.split('.' ) __snake_case = 0 # First let's find the module where our object lives. __snake_case = parts[i] while i < len(__snake_case ) and not os.path.isfile(os.path.join(__snake_case ,f'''{module}.py''' ) ): i += 1 if i < len(__snake_case ): __snake_case = os.path.join(__snake_case ,parts[i] ) if i >= len(__snake_case ): raise ValueError(f'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' ) with open(os.path.join(__snake_case ,f'''{module}.py''' ) ,'r' ,encoding='utf-8' ,newline='\n' ) as f: __snake_case = f.readlines() # Now let's find the class / func in the code! __snake_case = '' __snake_case = 0 for name in parts[i + 1 :]: while ( line_index < len(__snake_case ) and re.search(rf'''^{indent}(class|def)\s+{name}(\(|\:)''' ,lines[line_index] ) is None ): line_index += 1 indent += " " line_index += 1 if line_index >= len(__snake_case ): raise ValueError(f''' {object_name} does not match any function or class in {module}.''' ) # We found the beginning of the class / func, now let's find the end (when the indent diminishes). __snake_case = line_index while line_index < len(__snake_case ) and _should_continue(lines[line_index] ,__snake_case ): line_index += 1 # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 __snake_case = lines[start_index:line_index] return "".join(__snake_case ) lowerCamelCase_ : Tuple = re.compile(r"^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)") lowerCamelCase_ : Dict = re.compile(r"^\s*(\S+)->(\S+)(\s+.*|$)") lowerCamelCase_ : Any = re.compile(r"<FILL\s+[^>]*>") def __lowercase( __snake_case : List[Any] ) -> Union[str, Any]: __snake_case = code.split('\n' ) __snake_case = 0 while idx < len(__snake_case ) and len(lines[idx] ) == 0: idx += 1 if idx < len(__snake_case ): return re.search(r'^(\s*)\S' ,lines[idx] ).groups()[0] return "" def __lowercase( __snake_case : Union[str, Any] ) -> List[Any]: __snake_case = len(get_indent(__snake_case ) ) > 0 if has_indent: __snake_case = f'''class Bla:\n{code}''' __snake_case = black.Mode(target_versions={black.TargetVersion.PYaa} ,line_length=1_19 ,preview=__snake_case ) __snake_case = black.format_str(__snake_case ,mode=__snake_case ) __snake_case , __snake_case = style_docstrings_in_code(__snake_case ) return result[len('class Bla:\n' ) :] if has_indent else result def __lowercase( __snake_case : Tuple ,__snake_case : Union[str, Any]=False ) -> Any: with open(__snake_case ,'r' ,encoding='utf-8' ,newline='\n' ) as f: __snake_case = f.readlines() __snake_case = [] __snake_case = 0 # Not a for loop cause `lines` is going to change (if `overwrite=True`). while line_index < len(__snake_case ): __snake_case = _re_copy_warning.search(lines[line_index] ) if search is None: line_index += 1 continue # There is some copied code here, let's retrieve the original. __snake_case , __snake_case , __snake_case = search.groups() __snake_case = find_code_in_diffusers(__snake_case ) __snake_case = get_indent(__snake_case ) __snake_case = line_index + 1 if indent == theoretical_indent else line_index + 2 __snake_case = theoretical_indent __snake_case = start_index # Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment. __snake_case = True while line_index < len(__snake_case ) and should_continue: line_index += 1 if line_index >= len(__snake_case ): break __snake_case = lines[line_index] __snake_case = _should_continue(__snake_case ,__snake_case ) and re.search(f'''^{indent}# End copy''' ,__snake_case ) is None # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 __snake_case = lines[start_index:line_index] __snake_case = ''.join(__snake_case ) # Remove any nested `Copied from` comments to avoid circular copies __snake_case = [line for line in theoretical_code.split('\n' ) if _re_copy_warning.search(__snake_case ) is None] __snake_case = '\n'.join(__snake_case ) # Before comparing, use the `replace_pattern` on the original code. if len(__snake_case ) > 0: __snake_case = replace_pattern.replace('with' ,'' ).split(',' ) __snake_case = [_re_replace_pattern.search(__snake_case ) for p in patterns] for pattern in patterns: if pattern is None: continue __snake_case , __snake_case , __snake_case = pattern.groups() __snake_case = re.sub(__snake_case ,__snake_case ,__snake_case ) if option.strip() == "all-casing": __snake_case = re.sub(obja.lower() ,obja.lower() ,__snake_case ) __snake_case = re.sub(obja.upper() ,obja.upper() ,__snake_case ) # Blackify after replacement. To be able to do that, we need the header (class or function definition) # from the previous line __snake_case = blackify(lines[start_index - 1] + theoretical_code ) __snake_case = theoretical_code[len(lines[start_index - 1] ) :] # Test for a diff and act accordingly. if observed_code != theoretical_code: diffs.append([object_name, start_index] ) if overwrite: __snake_case = lines[:start_index] + [theoretical_code] + lines[line_index:] __snake_case = start_index + 1 if overwrite and len(__snake_case ) > 0: # Warn the user a file has been modified. print(f'''Detected changes, rewriting {filename}.''' ) with open(__snake_case ,'w' ,encoding='utf-8' ,newline='\n' ) as f: f.writelines(__snake_case ) return diffs def __lowercase( __snake_case : bool = False ) -> Tuple: __snake_case = glob.glob(os.path.join(__snake_case ,'**/*.py' ) ,recursive=__snake_case ) __snake_case = [] for filename in all_files: __snake_case = is_copy_consistent(__snake_case ,__snake_case ) diffs += [f'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs] if not overwrite and len(__snake_case ) > 0: __snake_case = '\n'.join(__snake_case ) raise Exception( 'Found the following copy inconsistencies:\n' + diff + '\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.' ) if __name__ == "__main__": lowerCamelCase_ : Optional[Any] = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") lowerCamelCase_ : Any = parser.parse_args() check_copies(args.fix_and_overwrite)
708
import inspect from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel, VQModel from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class _lowerCamelCase (lowerCamelCase ): def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): super().__init__() self.register_modules(vqvae=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ ) @torch.no_grad() def __call__( self , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , **SCREAMING_SNAKE_CASE_ , ): __snake_case = randn_tensor( (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=SCREAMING_SNAKE_CASE_ , ) __snake_case = latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler __snake_case = latents * self.scheduler.init_noise_sigma self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ ) # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature __snake_case = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) __snake_case = {} if accepts_eta: __snake_case = eta for t in self.progress_bar(self.scheduler.timesteps ): __snake_case = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # predict the noise residual __snake_case = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample # compute the previous noisy sample x_t -> x_t-1 __snake_case = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample # decode the image latents with the VAE __snake_case = self.vqvae.decode(SCREAMING_SNAKE_CASE_ ).sample __snake_case = (image / 2 + 0.5).clamp(0 , 1 ) __snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": __snake_case = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE_ )
345
0
import argparse import logging from collections import namedtuple import torch from model_bertabs import BertAbsSummarizer from models.model_builder import AbsSummarizer # The authors' implementation from transformers import BertTokenizer logging.basicConfig(level=logging.INFO) A : Any = logging.getLogger(__name__) A : int = 'Hello world! cécé herlolip' A : Optional[int] = namedtuple( 'BertAbsConfig', [ 'temp_dir', 'large', 'use_bert_emb', 'finetune_bert', 'encoder', 'share_emb', 'max_pos', 'enc_layers', 'enc_hidden_size', 'enc_heads', 'enc_ff_size', 'enc_dropout', 'dec_layers', 'dec_hidden_size', 'dec_heads', 'dec_ff_size', 'dec_dropout', ], ) def UpperCamelCase ( __magic_name__ : List[str] , __magic_name__ : List[str] ) -> str: """simple docstring""" lowercase__ = BertAbsConfig( temp_dir=""".""" , finetune_bert=__magic_name__ , large=__magic_name__ , share_emb=__magic_name__ , use_bert_emb=__magic_name__ , encoder="""bert""" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , ) lowercase__ = torch.load(__magic_name__ , lambda __magic_name__ , __magic_name__ : storage ) lowercase__ = AbsSummarizer(__magic_name__ , torch.device("""cpu""" ) , __magic_name__ ) original.eval() lowercase__ = BertAbsSummarizer(__magic_name__ , torch.device("""cpu""" ) ) new_model.eval() # ------------------- # Convert the weights # ------------------- logging.info("""convert the model""" ) new_model.bert.load_state_dict(original.bert.state_dict() ) new_model.decoder.load_state_dict(original.decoder.state_dict() ) new_model.generator.load_state_dict(original.generator.state_dict() ) # ---------------------------------- # Make sure the outpus are identical # ---------------------------------- logging.info("""Make sure that the models' outputs are identical""" ) lowercase__ = BertTokenizer.from_pretrained("""bert-base-uncased""" ) # prepare the model inputs lowercase__ = tokenizer.encode("""This is sample éàalj'-.""" ) encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(__magic_name__ )) ) lowercase__ = torch.tensor(__magic_name__ ).unsqueeze(0 ) lowercase__ = tokenizer.encode("""This is sample 3 éàalj'-.""" ) decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(__magic_name__ )) ) lowercase__ = torch.tensor(__magic_name__ ).unsqueeze(0 ) # failsafe to make sure the weights reset does not affect the # loaded weights. assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0 # forward pass lowercase__ = encoder_input_ids lowercase__ = decoder_input_ids lowercase__ = lowercase__ = None lowercase__ = None lowercase__ = lowercase__ = None lowercase__ = lowercase__ = None lowercase__ = None # The original model does not apply the geneator layer immediatly but rather in # the beam search (where it combines softmax + linear layer). Since we already # apply the softmax in our generation process we only apply the linear layer here. # We make sure that the outputs of the full stack are identical lowercase__ = original(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )[0] lowercase__ = original.generator(__magic_name__ ) lowercase__ = new_model( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )[0] lowercase__ = new_model.generator(__magic_name__ ) lowercase__ = torch.max(torch.abs(output_converted_model - output_original_model ) ).item() print("""Maximum absolute difference beween weights: {:.2f}""".format(__magic_name__ ) ) lowercase__ = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item() print("""Maximum absolute difference beween weights: {:.2f}""".format(__magic_name__ ) ) lowercase__ = torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) if are_identical: logging.info("""all weights are equal up to 1e-3""" ) else: raise ValueError("""the weights are different. The new model is likely different from the original one.""" ) # The model has been saved with torch.save(model) and this is bound to the exact # directory structure. We save the state_dict instead. logging.info("""saving the model's state dictionary""" ) torch.save( new_model.state_dict() , """./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin""" ) if __name__ == "__main__": A : List[str] = argparse.ArgumentParser() parser.add_argument( '--bertabs_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.', ) A : Tuple = parser.parse_args() convert_bertabs_checkpoints( args.bertabs_checkpoint_path, args.pytorch_dump_folder_path, )
15
"""simple docstring""" from sympy import diff, lambdify, symbols from sympy.functions import * # noqa: F403 def lowerCamelCase__ ( _lowerCamelCase : str , _lowerCamelCase : complex , _lowerCamelCase : str = "x" , _lowerCamelCase : float = 10**-10 , _lowerCamelCase : int = 1 , ) -> complex: lowerCamelCase_ = symbols(_lowerCamelCase ) lowerCamelCase_ = lambdify(_lowerCamelCase , _lowerCamelCase ) lowerCamelCase_ = lambdify(_lowerCamelCase , diff(_lowerCamelCase , _lowerCamelCase ) ) lowerCamelCase_ = starting_point while True: if diff_function(_lowerCamelCase ) != 0: lowerCamelCase_ = prev_guess - multiplicity * func(_lowerCamelCase ) / diff_function( _lowerCamelCase ) else: raise ZeroDivisionError('Could not find root' ) from None # Precision is checked by comparing the difference of consecutive guesses if abs(next_guess - prev_guess ) < precision: return next_guess lowerCamelCase_ = next_guess # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(F'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''') # Find root of polynomial # Find fourth Root of 5 print(F'''The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}''') # Find value of e print( '''The root of log(y) - 1 = 0 is ''', F'''{newton_raphson('log(y) - 1', 2, variable='y')}''', ) # Exponential Roots print( '''The root of exp(x) - 1 = 0 is''', F'''{newton_raphson('exp(x) - 1', 10, precision=0.005)}''', ) # Find root of cos(x) print(F'''The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}''')
549
0
def lowerCAmelCase_ ( a : Tuple ): # if the collection is empty, returns empty if collection == []: return [] # get some information about the collection a__ = len(_A ) a__ = max(_A ) a__ = min(_A ) # create the counting array a__ = coll_max + 1 - coll_min a__ = [0] * counting_arr_length # count how much a number appears in the collection for number in collection: counting_arr[number - coll_min] += 1 # sum each position with it's predecessors. now, counting_arr[i] tells # us how many elements <= i has in the collection for i in range(1 , _A ): a__ = counting_arr[i] + counting_arr[i - 1] # create the output collection a__ = [0] * coll_len # place the elements in the output, respecting the original order (stable # sort) from end to begin, updating counting_arr for i in reversed(range(0 , _A ) ): a__ = collection[i] counting_arr[collection[i] - coll_min] -= 1 return ordered def lowerCAmelCase_ ( a : List[str] ): return "".join([chr(_A ) for i in counting_sort([ord(_A ) for c in string] )] ) if __name__ == "__main__": # Test string sort assert counting_sort_string('thisisthestring') == "eghhiiinrsssttt" __A : int = input('Enter numbers separated by a comma:\n').strip() __A : Optional[int] = [int(item) for item in user_input.split(',')] print(counting_sort(unsorted))
709
'''simple docstring''' import os from typing import Dict, List, Union import tensorflow as tf from keras_nlp.tokenizers import BytePairTokenizer from tensorflow_text import pad_model_inputs from .tokenization_gpta import GPTaTokenizer class _UpperCamelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self , _a , _a , _a = None , _a = None ): """simple docstring""" super().__init__() a__ = pad_token_id a__ = max_length a__ = vocab a__ = merges a__ = BytePairTokenizer(_a , _a , sequence_length=_a ) @classmethod def lowercase__ ( cls , _a , *_a , **_a ): """simple docstring""" a__ = [' '.join(_a ) for m in tokenizer.bpe_ranks.keys()] a__ = tokenizer.get_vocab() return cls(_a , _a , *_a , **_a ) @classmethod def lowercase__ ( cls , _a , *_a , **_a ): """simple docstring""" a__ = GPTaTokenizer.from_pretrained(_a , *_a , **_a ) return cls.from_tokenizer(_a , *_a , **_a ) @classmethod def lowercase__ ( cls , _a ): """simple docstring""" return cls(**_a ) def lowercase__ ( self ): """simple docstring""" return { "vocab": self.vocab, "merges": self.merges, "max_length": self.max_length, "pad_token_id": self.pad_token_id, } def lowercase__ ( self , _a , _a = None ): """simple docstring""" a__ = self.tf_tokenizer(_a ) a__ = tf.ones_like(_a ) if self.pad_token_id is not None: # pad the tokens up to max length a__ = max_length if max_length is not None else self.max_length if max_length is not None: a__ , a__ = pad_model_inputs( _a , max_seq_length=_a , pad_value=self.pad_token_id ) return {"attention_mask": attention_mask, "input_ids": input_ids}
126
0
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = 1_0 , __UpperCAmelCase = 2_2 ) -> int: lowercase__: Optional[Any] = range(1 , __UpperCAmelCase ) lowercase__: Any = range(1 , __UpperCAmelCase ) return sum( 1 for power in powers for base in bases if len(str(base**power ) ) == power ) if __name__ == "__main__": print(f'''{solution(1_0, 2_2) = }''')
586
"""simple docstring""" from __future__ import annotations from typing import Any def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> int: if not postfix_notation: return 0 lowercase__: int = {'''+''', '''-''', '''*''', '''/'''} lowercase__: list[Any] = [] for token in postfix_notation: if token in operations: lowercase__, lowercase__: Optional[int] = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(__UpperCAmelCase ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
586
1
import argparse import torch from torch import nn from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration def UpperCAmelCase__ ( UpperCAmelCase__ :str ): '''simple docstring''' a = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "decoder.output_projection.weight", "_float_tensor", "encoder.embed_positions._float_tensor", "decoder.embed_positions._float_tensor", ] for k in ignore_keys: state_dict.pop(UpperCAmelCase__ , UpperCAmelCase__ ) def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[int] ): '''simple docstring''' a , a = emb.weight.shape a = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ , bias=UpperCAmelCase__ ) a = emb.weight.data return lin_layer def UpperCAmelCase__ ( UpperCAmelCase__ :int ): '''simple docstring''' a = torch.load(UpperCAmelCase__ , map_location="cpu" ) a = mam_aaa["args"] or mam_aaa["cfg"]["model"] a = mam_aaa["model"] remove_ignore_keys_(UpperCAmelCase__ ) a = state_dict["encoder.embed_tokens.weight"].shape[0] a = MaMaaaConfig( vocab_size=UpperCAmelCase__ , max_position_embeddings=10_24 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , ) a = state_dict["decoder.embed_tokens.weight"] a = MaMaaaForConditionalGeneration(UpperCAmelCase__ ) model.model.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ ) a = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": A_ : int = argparse.ArgumentParser() # Required parameters parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') A_ : Dict = parser.parse_args() A_ : Optional[int] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß) model.save_pretrained(args.pytorch_dump_folder_path)
32
from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass A_ : List[str] = (3, 9, -11, 0, 7, 5, 1, -1) A_ : Optional[int] = (4, 6, 2, 0, 8, 10, 3, -2) @dataclass class _lowercase : _UpperCAmelCase = 42 _UpperCAmelCase = 42 class _lowercase : def __init__( self : List[Any] , __lowerCAmelCase : Iterable[int] ) -> None: """simple docstring""" a = None for i in sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ): a = Node(__lowerCAmelCase , self.head ) def __iter__( self : Union[str, Any] ) -> Iterator[int]: """simple docstring""" a = self.head while node: yield node.data a = node.next_node def __len__( self : Tuple ) -> int: """simple docstring""" return sum(1 for _ in self ) def __str__( self : Union[str, Any] ) -> str: """simple docstring""" return " -> ".join([str(__lowerCAmelCase ) for node in self] ) def UpperCAmelCase__ ( UpperCAmelCase__ :SortedLinkedList , UpperCAmelCase__ :SortedLinkedList ): '''simple docstring''' return SortedLinkedList(list(UpperCAmelCase__ ) + list(UpperCAmelCase__ ) ) if __name__ == "__main__": import doctest doctest.testmod() A_ : Optional[Any] = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
32
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, is_batched, to_numpy_array, valid_images, ) from ...utils import TensorType, logging SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__) class UpperCAmelCase_ ( __lowerCamelCase ): __lowerCamelCase = ['pixel_values'] def __init__( self , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = PILImageResampling.BICUBIC , _lowerCAmelCase = True , _lowerCAmelCase = True , _lowerCAmelCase = 1 / 255 , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = None , **_lowerCAmelCase , ): super().__init__(**_lowerCAmelCase ) UpperCAmelCase__ : Optional[Any] = size if size is not None else {"""height""": 224, """width""": 224} UpperCAmelCase__ : str = get_size_dict(_lowerCAmelCase ) UpperCAmelCase__ : Union[str, Any] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} UpperCAmelCase__ : Any = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase , param_name="""crop_size""" ) UpperCAmelCase__ : Optional[int] = do_resize UpperCAmelCase__ : List[str] = do_rescale UpperCAmelCase__ : Tuple = do_normalize UpperCAmelCase__ : Tuple = do_center_crop UpperCAmelCase__ : List[str] = crop_size UpperCAmelCase__ : List[Any] = size UpperCAmelCase__ : Union[str, Any] = resample UpperCAmelCase__ : Union[str, Any] = rescale_factor UpperCAmelCase__ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN UpperCAmelCase__ : List[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = PILImageResampling.BILINEAR , _lowerCAmelCase = None , **_lowerCAmelCase , ): UpperCAmelCase__ : Union[str, Any] = get_size_dict(_lowerCAmelCase ) if "shortest_edge" in size: UpperCAmelCase__ : Tuple = get_resize_output_image_size(_lowerCAmelCase , size=size["""shortest_edge"""] , default_to_square=_lowerCAmelCase ) # size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"]) elif "height" in size and "width" in size: UpperCAmelCase__ : List[Any] = (size["""height"""], size["""width"""]) else: raise ValueError(f"Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}" ) return resize(_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase ) def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ): UpperCAmelCase__ : str = get_size_dict(_lowerCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(f"The `size` parameter must contain the keys (height, width). Got {size.keys()}" ) return center_crop(_lowerCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=_lowerCAmelCase , **_lowerCAmelCase ) def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase ): return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase ) def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ): return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase ) def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = ChannelDimension.FIRST , **_lowerCAmelCase , ): UpperCAmelCase__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize UpperCAmelCase__ : int = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase__ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase__ : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCAmelCase__ : List[Any] = crop_size if crop_size is not None else self.crop_size UpperCAmelCase__ : List[str] = get_size_dict(_lowerCAmelCase , param_name="""crop_size""" , default_to_square=_lowerCAmelCase ) UpperCAmelCase__ : str = resample if resample is not None else self.resample UpperCAmelCase__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase__ : int = image_mean if image_mean is not None else self.image_mean UpperCAmelCase__ : List[Any] = image_std if image_std is not None else self.image_std UpperCAmelCase__ : Optional[Any] = size if size is not None else self.size UpperCAmelCase__ : List[str] = get_size_dict(_lowerCAmelCase ) if not is_batched(_lowerCAmelCase ): UpperCAmelCase__ : Optional[int] = [images] if not valid_images(_lowerCAmelCase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) # All transformations expect numpy arrays. UpperCAmelCase__ : Tuple = [to_numpy_array(_lowerCAmelCase ) for image in images] if do_resize: UpperCAmelCase__ : Any = [self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase ) for image in images] if do_center_crop: UpperCAmelCase__ : Tuple = [self.center_crop(image=_lowerCAmelCase , size=_lowerCAmelCase ) for image in images] if do_rescale: UpperCAmelCase__ : Union[str, Any] = [self.rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase ) for image in images] if do_normalize: UpperCAmelCase__ : Optional[int] = [self.normalize(image=_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase ) for image in images] UpperCAmelCase__ : Tuple = [to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images] UpperCAmelCase__ : Any = {"""pixel_values""": images} return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
79
import json import os import unittest from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __UpperCamelCase ( lowercase , unittest.TestCase ): SCREAMING_SNAKE_CASE__ = GPTaTokenizer SCREAMING_SNAKE_CASE__ = GPTaTokenizerFast SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = {'add_prefix_space': True} SCREAMING_SNAKE_CASE__ = False def __A ( self : Dict ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCAmelCase_ = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", "<|endoftext|>", ] UpperCAmelCase_ = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase ) ) ) ) UpperCAmelCase_ = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] UpperCAmelCase_ = {"unk_token": "<unk>"} UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowerCAmelCase ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(lowerCAmelCase ) ) def __A ( self : Optional[int] , **lowerCAmelCase : List[str] ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return GPTaTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase ) def __A ( self : Union[str, Any] , **lowerCAmelCase : Tuple ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase ) def __A ( self : Optional[Any] , lowerCAmelCase : int ): '''simple docstring''' UpperCAmelCase_ = "lower newer" UpperCAmelCase_ = "lower newer" return input_text, output_text def __A ( self : List[Any] ): '''simple docstring''' UpperCAmelCase_ = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) UpperCAmelCase_ = "lower newer" UpperCAmelCase_ = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"] UpperCAmelCase_ = tokenizer.tokenize(lowerCAmelCase , add_prefix_space=lowerCAmelCase ) self.assertListEqual(lowerCAmelCase , lowerCAmelCase ) UpperCAmelCase_ = tokens + [tokenizer.unk_token] UpperCAmelCase_ = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , lowerCAmelCase ) def __A ( self : str ): '''simple docstring''' if not self.test_rust_tokenizer: return UpperCAmelCase_ = self.get_tokenizer() UpperCAmelCase_ = self.get_rust_tokenizer(add_prefix_space=lowerCAmelCase ) UpperCAmelCase_ = "lower newer" # Testing tokenization UpperCAmelCase_ = tokenizer.tokenize(lowerCAmelCase , add_prefix_space=lowerCAmelCase ) UpperCAmelCase_ = rust_tokenizer.tokenize(lowerCAmelCase ) self.assertListEqual(lowerCAmelCase , lowerCAmelCase ) # Testing conversion to ids without special tokens UpperCAmelCase_ = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase , add_prefix_space=lowerCAmelCase ) UpperCAmelCase_ = rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) self.assertListEqual(lowerCAmelCase , lowerCAmelCase ) # Testing conversion to ids with special tokens UpperCAmelCase_ = self.get_rust_tokenizer(add_prefix_space=lowerCAmelCase ) UpperCAmelCase_ = tokenizer.encode(lowerCAmelCase , add_prefix_space=lowerCAmelCase ) UpperCAmelCase_ = rust_tokenizer.encode(lowerCAmelCase ) self.assertListEqual(lowerCAmelCase , lowerCAmelCase ) # Testing the unknown token UpperCAmelCase_ = tokens + [rust_tokenizer.unk_token] UpperCAmelCase_ = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , lowerCAmelCase ) def __A ( self : Optional[int] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : int ): '''simple docstring''' pass def __A ( self : str , lowerCAmelCase : List[str]=15 ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase ) # Simple input UpperCAmelCase_ = "This is a simple input" UpperCAmelCase_ = ["This is a simple input 1", "This is a simple input 2"] UpperCAmelCase_ = ("This is a simple input", "This is a pair") UpperCAmelCase_ = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(lowerCAmelCase , tokenizer_r.encode , lowerCAmelCase , max_length=lowerCAmelCase , padding="max_length" ) # Simple input self.assertRaises(lowerCAmelCase , tokenizer_r.encode_plus , lowerCAmelCase , max_length=lowerCAmelCase , padding="max_length" ) # Simple input self.assertRaises( lowerCAmelCase , tokenizer_r.batch_encode_plus , lowerCAmelCase , max_length=lowerCAmelCase , padding="max_length" , ) # Pair input self.assertRaises(lowerCAmelCase , tokenizer_r.encode , lowerCAmelCase , max_length=lowerCAmelCase , padding="max_length" ) # Pair input self.assertRaises(lowerCAmelCase , tokenizer_r.encode_plus , lowerCAmelCase , max_length=lowerCAmelCase , padding="max_length" ) # Pair input self.assertRaises( lowerCAmelCase , tokenizer_r.batch_encode_plus , lowerCAmelCase , max_length=lowerCAmelCase , padding="max_length" , ) def __A ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase_ = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" ) # Simple input UpperCAmelCase_ = "This is a simple input" UpperCAmelCase_ = ["This is a simple input looooooooong", "This is a simple input"] UpperCAmelCase_ = ("This is a simple input", "This is a pair") UpperCAmelCase_ = [ ("This is a simple input loooooong", "This is a simple input"), ("This is a simple pair loooooong", "This is a simple pair"), ] UpperCAmelCase_ = tokenizer.pad_token_id UpperCAmelCase_ = tokenizer(lowerCAmelCase , padding="max_length" , max_length=30 , return_tensors="np" ) UpperCAmelCase_ = tokenizer(lowerCAmelCase , padding=lowerCAmelCase , truncate=lowerCAmelCase , return_tensors="np" ) UpperCAmelCase_ = tokenizer(*lowerCAmelCase , padding="max_length" , max_length=60 , return_tensors="np" ) UpperCAmelCase_ = tokenizer(lowerCAmelCase , padding=lowerCAmelCase , truncate=lowerCAmelCase , return_tensors="np" ) # s # test single string max_length padding self.assertEqual(out_s["input_ids"].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s["input_ids"] ) self.assertTrue(0 in out_s["attention_mask"] ) # s2 # test automatic padding self.assertEqual(out_sa["input_ids"].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa["input_ids"][0] ) self.assertFalse(0 in out_sa["attention_mask"][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa["input_ids"][1] ) self.assertTrue(0 in out_sa["attention_mask"][1] ) # p # test single pair max_length padding self.assertEqual(out_p["input_ids"].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p["input_ids"] ) self.assertTrue(0 in out_p["attention_mask"] ) # p2 # test automatic padding pair self.assertEqual(out_pa["input_ids"].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa["input_ids"][0] ) self.assertFalse(0 in out_pa["attention_mask"][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa["input_ids"][1] ) self.assertTrue(0 in out_pa["attention_mask"][1] ) def __A ( self : Dict ): '''simple docstring''' UpperCAmelCase_ = "$$$" UpperCAmelCase_ = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=lowerCAmelCase , add_bos_token=lowerCAmelCase ) UpperCAmelCase_ = "This is a simple input" UpperCAmelCase_ = ["This is a simple input 1", "This is a simple input 2"] UpperCAmelCase_ = tokenizer.bos_token_id UpperCAmelCase_ = tokenizer(lowerCAmelCase ) UpperCAmelCase_ = tokenizer(lowerCAmelCase ) self.assertEqual(out_s.input_ids[0] , lowerCAmelCase ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) UpperCAmelCase_ = tokenizer.decode(out_s.input_ids ) UpperCAmelCase_ = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , lowerCAmelCase ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) def __A ( self : int ): '''simple docstring''' pass def __A ( self : Dict ): '''simple docstring''' UpperCAmelCase_ = [self.get_tokenizer(do_lower_case=lowerCAmelCase , add_bos_token=lowerCAmelCase )] for tokenizer in tokenizers: with self.subTest(F"{tokenizer.__class__.__name__}" ): UpperCAmelCase_ = "Encode this." UpperCAmelCase_ = "This one too please." UpperCAmelCase_ = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) encoded_sequence += tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) UpperCAmelCase_ = tokenizer.encode_plus( lowerCAmelCase , lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_special_tokens_mask=lowerCAmelCase , ) UpperCAmelCase_ = encoded_sequence_dict["input_ids"] UpperCAmelCase_ = encoded_sequence_dict["special_tokens_mask"] self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) ) UpperCAmelCase_ = [ (x if not special_tokens_mask[i] else None) for i, x in enumerate(lowerCAmelCase ) ] UpperCAmelCase_ = [x for x in filtered_sequence if x is not None] self.assertEqual(lowerCAmelCase , lowerCAmelCase ) @require_tokenizers class __UpperCamelCase ( unittest.TestCase ): def __A ( self : int ): '''simple docstring''' UpperCAmelCase_ = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=lowerCAmelCase ) UpperCAmelCase_ = "A photo of a cat" UpperCAmelCase_ = tokenizer.encode( lowerCAmelCase , ) self.assertEqual(lowerCAmelCase , [2, 250, 1_345, 9, 10, 4_758] ) tokenizer.save_pretrained("test_opt" ) UpperCAmelCase_ = AutoTokenizer.from_pretrained("./test_opt" ) UpperCAmelCase_ = tokenizer.encode( lowerCAmelCase , ) self.assertEqual(lowerCAmelCase , [2, 250, 1_345, 9, 10, 4_758] ) def __A ( self : int ): '''simple docstring''' UpperCAmelCase_ = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=lowerCAmelCase ) UpperCAmelCase_ = "A photo of a cat" UpperCAmelCase_ = tokenizer.encode( lowerCAmelCase , ) # Same as above self.assertEqual(lowerCAmelCase , [2, 250, 1_345, 9, 10, 4_758] ) @unittest.skip("This test is failing because of a bug in the fast tokenizer" ) def __A ( self : List[Any] ): '''simple docstring''' UpperCAmelCase_ = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=lowerCAmelCase ) UpperCAmelCase_ = "bos" UpperCAmelCase_ = tokenizer.get_vocab()["bos"] UpperCAmelCase_ = "A photo of a cat" UpperCAmelCase_ = tokenizer.encode( lowerCAmelCase , ) # We changed the bos token self.assertEqual(lowerCAmelCase , [31_957, 250, 1_345, 9, 10, 4_758] ) tokenizer.save_pretrained("./tok" ) UpperCAmelCase_ = AutoTokenizer.from_pretrained("./tok" ) self.assertTrue(tokenizer.is_fast ) UpperCAmelCase_ = tokenizer.encode( lowerCAmelCase , ) self.assertEqual(lowerCAmelCase , [31_957, 250, 1_345, 9, 10, 4_758] )
162
0
UpperCAmelCase_ : List[Any] = 0 # The first color of the flag. UpperCAmelCase_ : Any = 1 # The second color of the flag. UpperCAmelCase_ : int = 2 # The third color of the flag. UpperCAmelCase_ : Union[str, Any] = (red, white, blue) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list ) -> list: """simple docstring""" if not sequence: return [] if len(__magic_name__ ) == 1: return list(__magic_name__ ) UpperCamelCase :Union[str, Any] = 0 UpperCamelCase :Optional[int] = len(__magic_name__ ) - 1 UpperCamelCase :List[Any] = 0 while mid <= high: if sequence[mid] == colors[0]: UpperCamelCase , UpperCamelCase :List[Any] = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: UpperCamelCase , UpperCamelCase :List[str] = sequence[high], sequence[mid] high -= 1 else: UpperCamelCase :List[Any] = f"""The elements inside the sequence must contains only {colors} values""" raise ValueError(__magic_name__ ) return sequence if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase_ : Dict = input('''Enter numbers separated by commas:\n''').strip() UpperCAmelCase_ : List[str] = [int(item.strip()) for item in user_input.split(''',''')] print(F'''{dutch_national_flag_sort(unsorted)}''')
590
import json import os import unittest from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ): snake_case__ : int = OpenAIGPTTokenizer snake_case__ : Tuple = OpenAIGPTTokenizerFast snake_case__ : Tuple = True snake_case__ : Union[str, Any] = False def _A ( self : int ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCamelCase :Optional[int] = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """w</w>""", """r</w>""", """t</w>""", """lo""", """low""", """er</w>""", """low</w>""", """lowest</w>""", """newer</w>""", """wider</w>""", """<unk>""", ] UpperCamelCase :List[Any] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) UpperCamelCase :Dict = ["""#version: 0.2""", """l o""", """lo w""", """e r</w>""", """"""] UpperCamelCase :Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCamelCase :Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" ) as fp: fp.write(json.dumps(__lowerCamelCase ) ) with open(self.merges_file , """w""" ) as fp: fp.write("""\n""".join(__lowerCamelCase ) ) def _A ( self : List[Any] , __lowerCamelCase : List[str] ): return "lower newer", "lower newer" def _A ( self : List[str] ): UpperCamelCase :Union[str, Any] = OpenAIGPTTokenizer(self.vocab_file , self.merges_file ) UpperCamelCase :List[Any] = """lower""" UpperCamelCase :Any = ["""low""", """er</w>"""] UpperCamelCase :List[str] = tokenizer.tokenize(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) UpperCamelCase :List[Any] = tokens + ["""<unk>"""] UpperCamelCase :List[str] = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase ) def _A ( self : Dict , __lowerCamelCase : List[Any]=15 ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): UpperCamelCase :Union[str, Any] = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase ) # Simple input UpperCamelCase :Optional[int] = """This is a simple input""" UpperCamelCase :Optional[int] = ["""This is a simple input 1""", """This is a simple input 2"""] UpperCamelCase :Optional[int] = ("""This is a simple input""", """This is a pair""") UpperCamelCase :int = [ ("""This is a simple input 1""", """This is a simple input 2"""), ("""This is a simple pair 1""", """This is a simple pair 2"""), ] # Simple input tests self.assertRaises(__lowerCamelCase , tokenizer_r.encode , __lowerCamelCase , max_length=__lowerCamelCase , padding="""max_length""" ) # Simple input self.assertRaises(__lowerCamelCase , tokenizer_r.encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding="""max_length""" ) # Simple input self.assertRaises( __lowerCamelCase , tokenizer_r.batch_encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding="""max_length""" , ) # Pair input self.assertRaises(__lowerCamelCase , tokenizer_r.encode , __lowerCamelCase , max_length=__lowerCamelCase , padding="""max_length""" ) # Pair input self.assertRaises(__lowerCamelCase , tokenizer_r.encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding="""max_length""" ) # Pair input self.assertRaises( __lowerCamelCase , tokenizer_r.batch_encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding="""max_length""" , ) def _A ( self : Dict ): pass @require_ftfy @require_spacy @require_tokenizers class _SCREAMING_SNAKE_CASE ( _a ): pass
590
1
import os import time import warnings from dataclasses import dataclass, field from enum import Enum from typing import List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import logging from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors from ..processors.utils import InputFeatures __UpperCAmelCase = logging.get_logger(__name__) @dataclass class lowerCAmelCase_ : UpperCAmelCase__ : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} ) UpperCAmelCase__ : str = field( metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} ) UpperCAmelCase__ : int = field( default=128 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) UpperCAmelCase__ : bool = field( default=a__ , metadata={"help": "Overwrite the cached training and evaluation sets"} ) def snake_case_ ( self ) -> Optional[Any]: UpperCamelCase : int = self.task_name.lower() class lowerCAmelCase_ ( a__ ): UpperCAmelCase__ : Any = "train" UpperCAmelCase__ : List[Any] = "dev" UpperCAmelCase__ : Any = "test" class lowerCAmelCase_ ( a__ ): UpperCAmelCase__ : GlueDataTrainingArguments UpperCAmelCase__ : str UpperCAmelCase__ : List[InputFeatures] def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = Split.train, SCREAMING_SNAKE_CASE_ = None, ) -> List[Any]: warnings.warn( 'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets ' 'library. You can have a look at this example script for pointers: ' 'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py', SCREAMING_SNAKE_CASE_, ) UpperCamelCase : Dict = args UpperCamelCase : Any = glue_processors[args.task_name]() UpperCamelCase : str = glue_output_modes[args.task_name] if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): try: UpperCamelCase : Optional[int] = Split[mode] except KeyError: raise KeyError('mode is not a valid split name' ) # Load data features from cache or dataset file UpperCamelCase : Dict = os.path.join( cache_dir if cache_dir is not None else args.data_dir, F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""", ) UpperCamelCase : int = self.processor.get_labels() if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in ( "RobertaTokenizer", "RobertaTokenizerFast", "XLMRobertaTokenizer", "BartTokenizer", "BartTokenizerFast", ): # HACK(label indices are swapped in RoBERTa pretrained model) UpperCamelCase , UpperCamelCase : Optional[int] = label_list[2], label_list[1] UpperCamelCase : Optional[Any] = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. UpperCamelCase : Optional[int] = cached_features_file + '.lock' with FileLock(SCREAMING_SNAKE_CASE_ ): if os.path.exists(SCREAMING_SNAKE_CASE_ ) and not args.overwrite_cache: UpperCamelCase : Tuple = time.time() UpperCamelCase : Union[str, Any] = torch.load(SCREAMING_SNAKE_CASE_ ) logger.info( F"""Loading features from cached file {cached_features_file} [took %.3f s]""", time.time() - start ) else: logger.info(F"""Creating features from dataset file at {args.data_dir}""" ) if mode == Split.dev: UpperCamelCase : Dict = self.processor.get_dev_examples(args.data_dir ) elif mode == Split.test: UpperCamelCase : List[str] = self.processor.get_test_examples(args.data_dir ) else: UpperCamelCase : Optional[Any] = self.processor.get_train_examples(args.data_dir ) if limit_length is not None: UpperCamelCase : Optional[int] = examples[:limit_length] UpperCamelCase : Optional[int] = glue_convert_examples_to_features( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, max_length=args.max_seq_length, label_list=SCREAMING_SNAKE_CASE_, output_mode=self.output_mode, ) UpperCamelCase : List[Any] = time.time() torch.save(self.features, SCREAMING_SNAKE_CASE_ ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" ) def __len__( self ) -> Dict: return len(self.features ) def __getitem__( self, SCREAMING_SNAKE_CASE_ ) -> InputFeatures: return self.features[i] def snake_case_ ( self ) -> Optional[int]: return self.label_list
40
def UpperCamelCase ( snake_case__ : int ) -> str: if isinstance(snake_case__ , snake_case__ ): raise TypeError('\'float\' object cannot be interpreted as an integer' ) if isinstance(snake_case__ , snake_case__ ): raise TypeError('\'str\' object cannot be interpreted as an integer' ) if num == 0: return "0b0" UpperCamelCase : int = False if num < 0: UpperCamelCase : Optional[Any] = True UpperCamelCase : Tuple = -num UpperCamelCase : list[int] = [] while num > 0: binary.insert(0 , num % 2 ) num >>= 1 if negative: return "-0b" + "".join(str(snake_case__ ) for e in binary ) return "0b" + "".join(str(snake_case__ ) for e in binary ) if __name__ == "__main__": import doctest doctest.testmod()
40
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) snake_case__ = { 'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'], 'tokenization_electra': ['ElectraTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ = ['ElectraTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ = [ 'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'ElectraForCausalLM', 'ElectraForMaskedLM', 'ElectraForMultipleChoice', 'ElectraForPreTraining', 'ElectraForQuestionAnswering', 'ElectraForSequenceClassification', 'ElectraForTokenClassification', 'ElectraModel', 'ElectraPreTrainedModel', 'load_tf_weights_in_electra', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ = [ 'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFElectraForMaskedLM', 'TFElectraForMultipleChoice', 'TFElectraForPreTraining', 'TFElectraForQuestionAnswering', 'TFElectraForSequenceClassification', 'TFElectraForTokenClassification', 'TFElectraModel', 'TFElectraPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ = [ 'FlaxElectraForCausalLM', 'FlaxElectraForMaskedLM', 'FlaxElectraForMultipleChoice', 'FlaxElectraForPreTraining', 'FlaxElectraForQuestionAnswering', 'FlaxElectraForSequenceClassification', 'FlaxElectraForTokenClassification', 'FlaxElectraModel', 'FlaxElectraPreTrainedModel', ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys snake_case__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
638
from .testing import ( are_the_same_tensors, execute_subprocess_async, require_bnb, require_cpu, require_cuda, require_huggingface_suite, require_mps, require_multi_gpu, require_multi_xpu, require_safetensors, require_single_gpu, require_single_xpu, require_torch_min_version, require_tpu, require_xpu, skip, slow, ) from .training import RegressionDataset, RegressionModel, RegressionModelaXPU from .scripts import test_script, test_sync, test_ops # isort: skip
638
1
import os from shutil import copyfile from typing import List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = {"vocab_file": "sentencepiece.model"} _lowercase = { "vocab_file": { "google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model", }, } _lowercase = { "google/rembert": 256, } class _UpperCAmelCase ( A__ ): UpperCamelCase__ = VOCAB_FILES_NAMES UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , a__ , a__=False , a__=True , a__=True , a__="[CLS]" , a__="[SEP]" , a__="[UNK]" , a__="[SEP]" , a__="[PAD]" , a__="[CLS]" , a__="[MASK]" , **a__ , ): super().__init__( do_lower_case=a__ , remove_space=a__ , keep_accents=a__ , bos_token=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , pad_token=a__ , cls_token=a__ , mask_token=a__ , **a__ , ) A__ = do_lower_case A__ = remove_space A__ = keep_accents A__ = vocab_file A__ = spm.SentencePieceProcessor() self.sp_model.Load(a__) @property def snake_case_ ( self): return len(self.sp_model) def snake_case_ ( self): A__ = {self.convert_ids_to_tokens(a__): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__( self): A__ = self.__dict__.copy() A__ = None return state def __setstate__( self , a__): A__ = d A__ = spm.SentencePieceProcessor() self.sp_model.Load(self.vocab_file) def snake_case_ ( self , a__ , a__=False): A__ = self.sp_model.EncodeAsPieces(a__) return pieces def snake_case_ ( self , a__): return self.sp_model.PieceToId(a__) def snake_case_ ( self , a__): return self.sp_model.IdToPiece(a__) def snake_case_ ( self , a__): A__ = self.sp_model.decode_pieces(a__) return out_string def snake_case_ ( self , a__ , a__ = None): A__ = [self.sep_token_id] A__ = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def snake_case_ ( self , a__ , a__ = None , a__ = False): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''') return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(a__)) + [1] + ([0] * len(a__)) + [1] return [1] + ([0] * len(a__)) + [1] def snake_case_ ( self , a__ , a__ = None): A__ = [self.sep_token_id] A__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def snake_case_ ( self , a__ , a__ = None): if not os.path.isdir(a__): logger.error('''Vocabulary path ({}) should be a directory'''.format(a__)) return A__ = os.path.join( a__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file''']) if os.path.abspath(self.vocab_file) != os.path.abspath(a__): copyfile(self.vocab_file , a__) return (out_vocab_file,)
632
from __future__ import annotations import collections import tempfile import unittest import numpy as np from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import is_tf_available, is_vision_available from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_tf_bert import TFBertModelTester from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester from ..deit.test_modeling_tf_deit import TFDeiTModelTester from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester from ..vit.test_modeling_tf_vit import TFViTModelTester if is_tf_available(): from transformers import ( TFBertModel, TFCLIPVisionModel, TFDeiTModel, TFRobertaModel, TFVisionTextDualEncoderModel, TFViTModel, VisionTextDualEncoderConfig, ) if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor def lowerCAmelCase__ ( UpperCamelCase_ : List[Any] )-> Dict: if isinstance(UpperCamelCase_ , collections.abc.Iterable ): return x return (x, x) @require_tf class _UpperCAmelCase : def snake_case_ ( self , a__ , a__): pass def snake_case_ ( self): pass def snake_case_ ( self): pass def snake_case_ ( self , a__ , a__ , a__ , a__ , a__=None , **a__): A__ = VisionTextDualEncoderConfig.from_vision_text_configs(a__ , a__) A__ = TFVisionTextDualEncoderModel(a__) A__ = model(input_ids=a__ , pixel_values=a__ , attention_mask=a__) self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim)) self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim)) def snake_case_ ( self , a__ , a__ , a__ , a__ , a__=None , **a__): A__ , A__ = self.get_vision_text_model(a__ , a__) A__ = TFVisionTextDualEncoderModel(vision_model=a__ , text_model=a__) A__ = model(input_ids=a__ , pixel_values=a__ , attention_mask=a__) self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim)) self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim)) def snake_case_ ( self , a__ , a__ , a__ , a__ , a__=None , **a__): A__ , A__ = self.get_vision_text_model(a__ , a__) A__ = {'''vision_model''': vision_model, '''text_model''': text_model} A__ = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**a__) A__ = model(input_ids=a__ , pixel_values=a__ , attention_mask=a__) self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim)) self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim)) def snake_case_ ( self , a__ , a__ , a__ , a__ , a__=None , **a__): A__ , A__ = self.get_vision_text_model(a__ , a__) A__ = TFVisionTextDualEncoderModel(vision_model=a__ , text_model=a__) A__ = model(input_ids=a__ , pixel_values=a__ , attention_mask=a__) A__ = output[0].numpy() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(a__) A__ = TFVisionTextDualEncoderModel.from_pretrained(a__) A__ = model(input_ids=a__ , pixel_values=a__ , attention_mask=a__) A__ = after_output[0].numpy() A__ = np.amax(np.abs(out_a - out_a)) self.assertLessEqual(a__ , 1e-5) def snake_case_ ( self , a__ , a__ , a__ , a__ , a__=None , **a__): A__ , A__ = self.get_vision_text_model(a__ , a__) A__ = TFVisionTextDualEncoderModel(vision_model=a__ , text_model=a__) A__ = model( input_ids=a__ , pixel_values=a__ , attention_mask=a__ , output_attentions=a__) A__ = output.vision_model_output.attentions self.assertEqual(len(a__) , vision_config.num_hidden_layers) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) A__ = to_atuple(vision_model.config.image_size) A__ = to_atuple(vision_model.config.patch_size) A__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) A__ = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len)) A__ = output.text_model_output.attentions self.assertEqual(len(a__) , text_config.num_hidden_layers) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def snake_case_ ( self , a__ , a__ , a__): A__ = np.abs((a - b)).max() self.assertLessEqual(a__ , a__ , F"Difference between torch and flax is {diff} (>= {tol}).") def snake_case_ ( self): A__ = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_model(**a__) def snake_case_ ( self): A__ = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**a__) def snake_case_ ( self): A__ = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**a__) def snake_case_ ( self): A__ = self.prepare_config_and_inputs() self.check_save_load(**a__) def snake_case_ ( self): A__ = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**a__) @slow def snake_case_ ( self): A__ , A__ = self.get_pretrained_model_and_inputs() A__ = model_a(**a__) A__ = outputs[0].numpy() with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(a__) A__ = TFVisionTextDualEncoderModel.from_pretrained(a__) A__ = model_a(**a__) A__ = after_outputs[0].numpy() A__ = np.amax(np.abs(out_a - out_a)) self.assertLessEqual(a__ , 1e-5) @require_tf class _UpperCAmelCase ( A__ , unittest.TestCase ): def snake_case_ ( self): A__ = TFVisionTextDualEncoderModel.from_vision_text_pretrained( '''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-random-bert''') A__ = 1_3 A__ = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ]) A__ = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size) A__ = random_attention_mask([batch_size, 4]) A__ = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask} return model, inputs def snake_case_ ( self , a__ , a__): A__ = TFViTModel(a__ , name='''vision_model''') A__ = TFBertModel(a__ , name='''text_model''') return vision_model, text_model def snake_case_ ( self): A__ = TFViTModelTester(self) A__ = TFBertModelTester(self) A__ = vit_model_tester.prepare_config_and_inputs() A__ = bert_model_tester.prepare_config_and_inputs() A__ , A__ , A__ = vision_config_and_inputs ( ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class _UpperCAmelCase ( A__ , unittest.TestCase ): def snake_case_ ( self): # DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's # just reinitialize it. A__ = TFVisionTextDualEncoderModel.from_vision_text_pretrained( '''Rocketknight1/tiny-random-deit-tf''' , '''hf-internal-testing/tiny-random-roberta''') A__ = 1_3 A__ = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ]) A__ = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size) A__ = random_attention_mask([batch_size, 4]) A__ = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask} return model, inputs def snake_case_ ( self , a__ , a__ , a__ , a__ , a__=None , **a__): A__ , A__ = self.get_vision_text_model(a__ , a__) A__ = TFVisionTextDualEncoderModel(vision_model=a__ , text_model=a__) A__ = model( input_ids=a__ , pixel_values=a__ , attention_mask=a__ , output_attentions=a__) A__ = output.vision_model_output.attentions self.assertEqual(len(a__) , vision_config.num_hidden_layers) # in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) A__ = to_atuple(vision_model.config.image_size) A__ = to_atuple(vision_model.config.patch_size) A__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) A__ = num_patches + 2 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len)) A__ = output.text_model_output.attentions self.assertEqual(len(a__) , text_config.num_hidden_layers) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def snake_case_ ( self , a__ , a__): A__ = TFDeiTModel(a__ , name='''vision_model''') A__ = TFRobertaModel(a__ , name='''text_model''') return vision_model, text_model def snake_case_ ( self): A__ = TFDeiTModelTester(self) A__ = TFRobertaModelTester(self) A__ = vit_model_tester.prepare_config_and_inputs() A__ = bert_model_tester.prepare_config_and_inputs() A__ , A__ , A__ = vision_config_and_inputs ( ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class _UpperCAmelCase ( A__ , unittest.TestCase ): def snake_case_ ( self): A__ = TFVisionTextDualEncoderModel.from_vision_text_pretrained( '''Rocketknight1/tiny-random-clip-tf''' , '''hf-internal-testing/tiny-random-bert''') A__ = 1_3 A__ = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ]) A__ = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size) A__ = random_attention_mask([batch_size, 4]) A__ = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask} return model, inputs def snake_case_ ( self , a__ , a__): A__ = TFCLIPVisionModel(a__ , name='''vision_model''') A__ = TFBertModel(a__ , name='''text_model''') return vision_model, text_model def snake_case_ ( self): A__ = TFCLIPVisionModelTester(self) A__ = TFBertModelTester(self) A__ = clip_model_tester.prepare_config_and_inputs() A__ = bert_model_tester.prepare_config_and_inputs() A__ , A__ = vision_config_and_inputs ( ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_vision @require_tf class _UpperCAmelCase ( unittest.TestCase ): @slow def snake_case_ ( self): A__ = TFVisionTextDualEncoderModel.from_pretrained( '''clip-italian/clip-italian''' , logit_scale_init_value=1.0 , from_pt=a__) A__ = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''') A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''') A__ = processor( text=['''una foto di un gatto''', '''una foto di un cane'''] , images=a__ , padding=a__ , return_tensors='''np''') A__ = model(**a__) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0])) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) A__ = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]]) self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , a__ , atol=1e-3))
632
1
"""simple docstring""" from collections import Counter from pathlib import Path from typing import Optional, Tuple import yaml class _A ( yaml.SafeLoader ): """simple docstring""" def __snake_case ( self : Any , __UpperCAmelCase : List[Any]): a : List[Any] = [self.constructed_objects[key_node] for key_node, _ in node.value] a : List[Any] = [tuple(__UpperCAmelCase) if isinstance(__UpperCAmelCase , __UpperCAmelCase) else key for key in keys] a : Optional[int] = Counter(__UpperCAmelCase) a : Any = [key for key in counter if counter[key] > 1] if duplicate_keys: raise TypeError(f'''Got duplicate yaml keys: {duplicate_keys}''') def __snake_case ( self : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Union[str, Any]=False): a : List[str] = super().construct_mapping(__UpperCAmelCase , deep=__UpperCAmelCase) self._check_no_duplicates_on_constructed_node(__UpperCAmelCase) return mapping def lowercase ( A_ )-> Tuple[Optional[str], str]: '''simple docstring''' a : Optional[int] = list(readme_content.splitlines() ) if full_content and full_content[0] == "---" and "---" in full_content[1:]: a : str = full_content[1:].index("---" ) + 1 a : Optional[Any] = "\n".join(full_content[1:sep_idx] ) return yamlblock, "\n".join(full_content[sep_idx + 1 :] ) return None, "\n".join(A_ ) class _A ( _a ): """simple docstring""" UpperCAmelCase : Optional[Any] = {"""train_eval_index"""} # train-eval-index in the YAML metadata @classmethod def __snake_case ( cls : Tuple , __UpperCAmelCase : Path): with open(__UpperCAmelCase , encoding="utf-8") as readme_file: a , a : Optional[int] = _split_yaml_from_readme(readme_file.read()) if yaml_string is not None: return cls.from_yaml_string(__UpperCAmelCase) else: return cls() def __snake_case ( self : Optional[int] , __UpperCAmelCase : Path): if path.exists(): with open(__UpperCAmelCase , encoding="utf-8") as readme_file: a : Optional[int] = readme_file.read() else: a : List[Any] = None a : str = self._to_readme(__UpperCAmelCase) with open(__UpperCAmelCase , "w" , encoding="utf-8") as readme_file: readme_file.write(__UpperCAmelCase) def __snake_case ( self : str , __UpperCAmelCase : Optional[str] = None): if readme_content is not None: a , a : Union[str, Any] = _split_yaml_from_readme(__UpperCAmelCase) a : Tuple = "---\n" + self.to_yaml_string() + "---\n" + content else: a : Any = "---\n" + self.to_yaml_string() + "---\n" return full_content @classmethod def __snake_case ( cls : str , __UpperCAmelCase : str): a : int = yaml.load(__UpperCAmelCase , Loader=_NoDuplicateSafeLoader) or {} # Convert the YAML keys to DatasetMetadata fields a : Any = { (key.replace("-" , "_") if key.replace("-" , "_") in cls._FIELDS_WITH_DASHES else key): value for key, value in metadata_dict.items() } return cls(**__UpperCAmelCase) def __snake_case ( self : Optional[int]): return yaml.safe_dump( { (key.replace("_" , "-") if key in self._FIELDS_WITH_DASHES else key): value for key, value in self.items() } , sort_keys=__UpperCAmelCase , allow_unicode=__UpperCAmelCase , encoding="utf-8" , ).decode("utf-8") __lowercase = { """image-classification""": [], """translation""": [], """image-segmentation""": [], """fill-mask""": [], """automatic-speech-recognition""": [], """token-classification""": [], """sentence-similarity""": [], """audio-classification""": [], """question-answering""": [], """summarization""": [], """zero-shot-classification""": [], """table-to-text""": [], """feature-extraction""": [], """other""": [], """multiple-choice""": [], """text-classification""": [], """text-to-image""": [], """text2text-generation""": [], """zero-shot-image-classification""": [], """tabular-classification""": [], """tabular-regression""": [], """image-to-image""": [], """tabular-to-text""": [], """unconditional-image-generation""": [], """text-retrieval""": [], """text-to-speech""": [], """object-detection""": [], """audio-to-audio""": [], """text-generation""": [], """conversational""": [], """table-question-answering""": [], """visual-question-answering""": [], """image-to-text""": [], """reinforcement-learning""": [], """voice-activity-detection""": [], """time-series-forecasting""": [], """document-question-answering""": [], } if __name__ == "__main__": from argparse import ArgumentParser __lowercase = ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""") ap.add_argument("""readme_filepath""") __lowercase = ap.parse_args() __lowercase = Path(args.readme_filepath) __lowercase = DatasetMetadata.from_readme(readme_filepath) print(dataset_metadata) dataset_metadata.to_readme(readme_filepath)
135
"""simple docstring""" import unittest from knapsack import knapsack as k class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Dict): a : Tuple = 0 a : Any = [0] a : List[Any] = [0] a : List[Any] = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0) a : List[str] = [60] a : Dict = [10] a : Optional[int] = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0) def __snake_case ( self : Optional[Any]): a : Union[str, Any] = 3 a : Any = [1, 2, 3] a : List[Any] = [3, 2, 1] a : Optional[Any] = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 5) def __snake_case ( self : str): a : int = 50 a : Dict = [60, 100, 120] a : Tuple = [10, 20, 30] a : Optional[Any] = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 220) if __name__ == "__main__": unittest.main()
135
1
from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES __UpperCamelCase : List[str] = logging.get_logger(__name__) __UpperCamelCase : List[str] = OrderedDict( [ # Base model mapping ("""albert""", """FlaxAlbertModel"""), ("""bart""", """FlaxBartModel"""), ("""beit""", """FlaxBeitModel"""), ("""bert""", """FlaxBertModel"""), ("""big_bird""", """FlaxBigBirdModel"""), ("""blenderbot""", """FlaxBlenderbotModel"""), ("""blenderbot-small""", """FlaxBlenderbotSmallModel"""), ("""clip""", """FlaxCLIPModel"""), ("""distilbert""", """FlaxDistilBertModel"""), ("""electra""", """FlaxElectraModel"""), ("""gpt-sw3""", """FlaxGPT2Model"""), ("""gpt2""", """FlaxGPT2Model"""), ("""gpt_neo""", """FlaxGPTNeoModel"""), ("""gptj""", """FlaxGPTJModel"""), ("""longt5""", """FlaxLongT5Model"""), ("""marian""", """FlaxMarianModel"""), ("""mbart""", """FlaxMBartModel"""), ("""mt5""", """FlaxMT5Model"""), ("""opt""", """FlaxOPTModel"""), ("""pegasus""", """FlaxPegasusModel"""), ("""regnet""", """FlaxRegNetModel"""), ("""resnet""", """FlaxResNetModel"""), ("""roberta""", """FlaxRobertaModel"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""), ("""roformer""", """FlaxRoFormerModel"""), ("""t5""", """FlaxT5Model"""), ("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""), ("""vit""", """FlaxViTModel"""), ("""wav2vec2""", """FlaxWav2Vec2Model"""), ("""whisper""", """FlaxWhisperModel"""), ("""xglm""", """FlaxXGLMModel"""), ("""xlm-roberta""", """FlaxXLMRobertaModel"""), ] ) __UpperCamelCase : Tuple = OrderedDict( [ # Model for pre-training mapping ("""albert""", """FlaxAlbertForPreTraining"""), ("""bart""", """FlaxBartForConditionalGeneration"""), ("""bert""", """FlaxBertForPreTraining"""), ("""big_bird""", """FlaxBigBirdForPreTraining"""), ("""electra""", """FlaxElectraForPreTraining"""), ("""longt5""", """FlaxLongT5ForConditionalGeneration"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""mt5""", """FlaxMT5ForConditionalGeneration"""), ("""roberta""", """FlaxRobertaForMaskedLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""), ("""roformer""", """FlaxRoFormerForMaskedLM"""), ("""t5""", """FlaxT5ForConditionalGeneration"""), ("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""), ("""whisper""", """FlaxWhisperForConditionalGeneration"""), ("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""), ] ) __UpperCamelCase : Optional[Any] = OrderedDict( [ # Model for Masked LM mapping ("""albert""", """FlaxAlbertForMaskedLM"""), ("""bart""", """FlaxBartForConditionalGeneration"""), ("""bert""", """FlaxBertForMaskedLM"""), ("""big_bird""", """FlaxBigBirdForMaskedLM"""), ("""distilbert""", """FlaxDistilBertForMaskedLM"""), ("""electra""", """FlaxElectraForMaskedLM"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""roberta""", """FlaxRobertaForMaskedLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""), ("""roformer""", """FlaxRoFormerForMaskedLM"""), ("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""), ] ) __UpperCamelCase : Optional[Any] = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ("""bart""", """FlaxBartForConditionalGeneration"""), ("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""), ("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""), ("""encoder-decoder""", """FlaxEncoderDecoderModel"""), ("""longt5""", """FlaxLongT5ForConditionalGeneration"""), ("""marian""", """FlaxMarianMTModel"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""mt5""", """FlaxMT5ForConditionalGeneration"""), ("""pegasus""", """FlaxPegasusForConditionalGeneration"""), ("""t5""", """FlaxT5ForConditionalGeneration"""), ] ) __UpperCamelCase : int = OrderedDict( [ # Model for Image-classsification ("""beit""", """FlaxBeitForImageClassification"""), ("""regnet""", """FlaxRegNetForImageClassification"""), ("""resnet""", """FlaxResNetForImageClassification"""), ("""vit""", """FlaxViTForImageClassification"""), ] ) __UpperCamelCase : List[Any] = OrderedDict( [ ("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""), ] ) __UpperCamelCase : List[Any] = OrderedDict( [ # Model for Causal LM mapping ("""bart""", """FlaxBartForCausalLM"""), ("""bert""", """FlaxBertForCausalLM"""), ("""big_bird""", """FlaxBigBirdForCausalLM"""), ("""electra""", """FlaxElectraForCausalLM"""), ("""gpt-sw3""", """FlaxGPT2LMHeadModel"""), ("""gpt2""", """FlaxGPT2LMHeadModel"""), ("""gpt_neo""", """FlaxGPTNeoForCausalLM"""), ("""gptj""", """FlaxGPTJForCausalLM"""), ("""opt""", """FlaxOPTForCausalLM"""), ("""roberta""", """FlaxRobertaForCausalLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""), ("""xglm""", """FlaxXGLMForCausalLM"""), ("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""), ] ) __UpperCamelCase : List[str] = OrderedDict( [ # Model for Sequence Classification mapping ("""albert""", """FlaxAlbertForSequenceClassification"""), ("""bart""", """FlaxBartForSequenceClassification"""), ("""bert""", """FlaxBertForSequenceClassification"""), ("""big_bird""", """FlaxBigBirdForSequenceClassification"""), ("""distilbert""", """FlaxDistilBertForSequenceClassification"""), ("""electra""", """FlaxElectraForSequenceClassification"""), ("""mbart""", """FlaxMBartForSequenceClassification"""), ("""roberta""", """FlaxRobertaForSequenceClassification"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""), ("""roformer""", """FlaxRoFormerForSequenceClassification"""), ("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""), ] ) __UpperCamelCase : List[str] = OrderedDict( [ # Model for Question Answering mapping ("""albert""", """FlaxAlbertForQuestionAnswering"""), ("""bart""", """FlaxBartForQuestionAnswering"""), ("""bert""", """FlaxBertForQuestionAnswering"""), ("""big_bird""", """FlaxBigBirdForQuestionAnswering"""), ("""distilbert""", """FlaxDistilBertForQuestionAnswering"""), ("""electra""", """FlaxElectraForQuestionAnswering"""), ("""mbart""", """FlaxMBartForQuestionAnswering"""), ("""roberta""", """FlaxRobertaForQuestionAnswering"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""), ("""roformer""", """FlaxRoFormerForQuestionAnswering"""), ("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""), ] ) __UpperCamelCase : int = OrderedDict( [ # Model for Token Classification mapping ("""albert""", """FlaxAlbertForTokenClassification"""), ("""bert""", """FlaxBertForTokenClassification"""), ("""big_bird""", """FlaxBigBirdForTokenClassification"""), ("""distilbert""", """FlaxDistilBertForTokenClassification"""), ("""electra""", """FlaxElectraForTokenClassification"""), ("""roberta""", """FlaxRobertaForTokenClassification"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""), ("""roformer""", """FlaxRoFormerForTokenClassification"""), ("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""), ] ) __UpperCamelCase : Dict = OrderedDict( [ # Model for Multiple Choice mapping ("""albert""", """FlaxAlbertForMultipleChoice"""), ("""bert""", """FlaxBertForMultipleChoice"""), ("""big_bird""", """FlaxBigBirdForMultipleChoice"""), ("""distilbert""", """FlaxDistilBertForMultipleChoice"""), ("""electra""", """FlaxElectraForMultipleChoice"""), ("""roberta""", """FlaxRobertaForMultipleChoice"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""), ("""roformer""", """FlaxRoFormerForMultipleChoice"""), ("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""), ] ) __UpperCamelCase : str = OrderedDict( [ ("""bert""", """FlaxBertForNextSentencePrediction"""), ] ) __UpperCamelCase : Optional[int] = OrderedDict( [ ("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""), ("""whisper""", """FlaxWhisperForConditionalGeneration"""), ] ) __UpperCamelCase : Dict = OrderedDict( [ ("""whisper""", """FlaxWhisperForAudioClassification"""), ] ) __UpperCamelCase : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) __UpperCamelCase : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) __UpperCamelCase : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) __UpperCamelCase : Dict = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) __UpperCamelCase : Dict = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) __UpperCamelCase : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) __UpperCamelCase : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) __UpperCamelCase : Tuple = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) __UpperCamelCase : Any = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) __UpperCamelCase : Union[str, Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) __UpperCamelCase : Tuple = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) __UpperCamelCase : str = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) __UpperCamelCase : List[str] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) __UpperCamelCase : Optional[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class __UpperCamelCase ( _BaseAutoModelClass ): __snake_case :Tuple = FLAX_MODEL_MAPPING __UpperCamelCase : Tuple = auto_class_update(FlaxAutoModel) class __UpperCamelCase ( _BaseAutoModelClass ): __snake_case :Union[str, Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING __UpperCamelCase : List[Any] = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""") class __UpperCamelCase ( _BaseAutoModelClass ): __snake_case :Dict = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING __UpperCamelCase : Union[str, Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""") class __UpperCamelCase ( _BaseAutoModelClass ): __snake_case :List[Any] = FLAX_MODEL_FOR_MASKED_LM_MAPPING __UpperCamelCase : Dict = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""") class __UpperCamelCase ( _BaseAutoModelClass ): __snake_case :Optional[Any] = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING __UpperCamelCase : Optional[Any] = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base""" ) class __UpperCamelCase ( _BaseAutoModelClass ): __snake_case :Optional[Any] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING __UpperCamelCase : Optional[int] = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc="""sequence classification""" ) class __UpperCamelCase ( _BaseAutoModelClass ): __snake_case :Optional[Any] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING __UpperCamelCase : Union[str, Any] = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""") class __UpperCamelCase ( _BaseAutoModelClass ): __snake_case :List[Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING __UpperCamelCase : Optional[int] = auto_class_update( FlaxAutoModelForTokenClassification, head_doc="""token classification""" ) class __UpperCamelCase ( _BaseAutoModelClass ): __snake_case :Optional[int] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING __UpperCamelCase : int = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""") class __UpperCamelCase ( _BaseAutoModelClass ): __snake_case :str = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING __UpperCamelCase : int = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction""" ) class __UpperCamelCase ( _BaseAutoModelClass ): __snake_case :Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING __UpperCamelCase : Optional[Any] = auto_class_update( FlaxAutoModelForImageClassification, head_doc="""image classification""" ) class __UpperCamelCase ( _BaseAutoModelClass ): __snake_case :Dict = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING __UpperCamelCase : Tuple = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""") class __UpperCamelCase ( _BaseAutoModelClass ): __snake_case :Optional[Any] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING __UpperCamelCase : str = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling""" )
80
from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block @dataclass class _UpperCamelCase ( lowerCamelCase__ ): '''simple docstring''' _A = 42 class _UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' @register_to_config def __init__( self : int , SCREAMING_SNAKE_CASE_ : int = 6_5_5_3_6 , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : str = "fourier" , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : float = 0.0 , SCREAMING_SNAKE_CASE_ : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , SCREAMING_SNAKE_CASE_ : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , SCREAMING_SNAKE_CASE_ : Tuple[str] = "UNetMidBlock1D" , SCREAMING_SNAKE_CASE_ : str = None , SCREAMING_SNAKE_CASE_ : Tuple[int] = (3_2, 3_2, 6_4) , SCREAMING_SNAKE_CASE_ : str = None , SCREAMING_SNAKE_CASE_ : int = 8 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : bool = False , ): super().__init__() _a = sample_size # time if time_embedding_type == "fourier": _a = GaussianFourierProjection( embedding_size=8 , set_W_to_weight=SCREAMING_SNAKE_CASE_ , log=SCREAMING_SNAKE_CASE_ , flip_sin_to_cos=SCREAMING_SNAKE_CASE_ ) _a = 2 * block_out_channels[0] elif time_embedding_type == "positional": _a = Timesteps( block_out_channels[0] , flip_sin_to_cos=SCREAMING_SNAKE_CASE_ , downscale_freq_shift=SCREAMING_SNAKE_CASE_ ) _a = block_out_channels[0] if use_timestep_embedding: _a = block_out_channels[0] * 4 _a = TimestepEmbedding( in_channels=SCREAMING_SNAKE_CASE_ , time_embed_dim=SCREAMING_SNAKE_CASE_ , act_fn=SCREAMING_SNAKE_CASE_ , out_dim=block_out_channels[0] , ) _a = nn.ModuleList([] ) _a = None _a = nn.ModuleList([] ) _a = None # down _a = in_channels for i, down_block_type in enumerate(SCREAMING_SNAKE_CASE_ ): _a = output_channel _a = block_out_channels[i] if i == 0: input_channel += extra_in_channels _a = i == len(SCREAMING_SNAKE_CASE_ ) - 1 _a = get_down_block( SCREAMING_SNAKE_CASE_ , num_layers=SCREAMING_SNAKE_CASE_ , in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , ) self.down_blocks.append(SCREAMING_SNAKE_CASE_ ) # mid _a = get_mid_block( SCREAMING_SNAKE_CASE_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=SCREAMING_SNAKE_CASE_ , add_downsample=SCREAMING_SNAKE_CASE_ , ) # up _a = list(reversed(SCREAMING_SNAKE_CASE_ ) ) _a = reversed_block_out_channels[0] if out_block_type is None: _a = out_channels else: _a = block_out_channels[0] for i, up_block_type in enumerate(SCREAMING_SNAKE_CASE_ ): _a = output_channel _a = ( reversed_block_out_channels[i + 1] if i < len(SCREAMING_SNAKE_CASE_ ) - 1 else final_upsample_channels ) _a = i == len(SCREAMING_SNAKE_CASE_ ) - 1 _a = get_up_block( SCREAMING_SNAKE_CASE_ , num_layers=SCREAMING_SNAKE_CASE_ , in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , ) self.up_blocks.append(SCREAMING_SNAKE_CASE_ ) _a = output_channel # out _a = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 3_2 ) _a = get_out_block( out_block_type=SCREAMING_SNAKE_CASE_ , num_groups_out=SCREAMING_SNAKE_CASE_ , embed_dim=block_out_channels[0] , out_channels=SCREAMING_SNAKE_CASE_ , act_fn=SCREAMING_SNAKE_CASE_ , fc_dim=block_out_channels[-1] // 4 , ) def _UpperCAmelCase ( self : int , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : Union[torch.Tensor, float, int] , SCREAMING_SNAKE_CASE_ : bool = True , ): _a = timestep if not torch.is_tensor(SCREAMING_SNAKE_CASE_ ): _a = torch.tensor([timesteps] , dtype=torch.long , device=sample.device ) elif torch.is_tensor(SCREAMING_SNAKE_CASE_ ) and len(timesteps.shape ) == 0: _a = timesteps[None].to(sample.device ) _a = self.time_proj(SCREAMING_SNAKE_CASE_ ) if self.config.use_timestep_embedding: _a = self.time_mlp(SCREAMING_SNAKE_CASE_ ) else: _a = timestep_embed[..., None] _a = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype ) _a = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) ) # 2. down _a = () for downsample_block in self.down_blocks: _a , _a = downsample_block(hidden_states=SCREAMING_SNAKE_CASE_ , temb=SCREAMING_SNAKE_CASE_ ) down_block_res_samples += res_samples # 3. mid if self.mid_block: _a = self.mid_block(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # 4. up for i, upsample_block in enumerate(self.up_blocks ): _a = down_block_res_samples[-1:] _a = down_block_res_samples[:-1] _a = upsample_block(SCREAMING_SNAKE_CASE_ , res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ , temb=SCREAMING_SNAKE_CASE_ ) # 5. post-process if self.out_block: _a = self.out_block(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if not return_dict: return (sample,) return UNetaDOutput(sample=SCREAMING_SNAKE_CASE_ )
562
0
from __future__ import annotations from collections.abc import Callable UpperCamelCase = list[list[float | int]] def _a ( lowerCamelCase__ , lowerCamelCase__ ) -> Matrix: lowerCamelCase_ : int = len(lowerCamelCase__ ) lowerCamelCase_ : Matrix = [[0 for _ in range(size + 1 )] for _ in range(lowerCamelCase__ )] lowerCamelCase_ : int lowerCamelCase_ : int lowerCamelCase_ : int lowerCamelCase_ : int lowerCamelCase_ : int lowerCamelCase_ : float for row in range(lowerCamelCase__ ): for col in range(lowerCamelCase__ ): lowerCamelCase_ : Dict = matrix[row][col] lowerCamelCase_ : List[str] = vector[row][0] lowerCamelCase_ : Dict = 0 lowerCamelCase_ : Optional[int] = 0 while row < size and col < size: # pivoting lowerCamelCase_ : Any = max((abs(augmented[rowa][col] ), rowa) for rowa in range(lowerCamelCase__ , lowerCamelCase__ ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: lowerCamelCase_ , lowerCamelCase_ : Any = augmented[pivot_row], augmented[row] for rowa in range(row + 1 , lowerCamelCase__ ): lowerCamelCase_ : Any = augmented[rowa][col] / augmented[row][col] lowerCamelCase_ : List[str] = 0 for cola in range(col + 1 , size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1 , lowerCamelCase__ ): for row in range(lowerCamelCase__ ): lowerCamelCase_ : Any = augmented[row][col] / augmented[col][col] for cola in range(lowerCamelCase__ , size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(lowerCamelCase__ ) ] def _a ( lowerCamelCase__ ) -> Callable[[int], int]: lowerCamelCase_ : int = len(lowerCamelCase__ ) lowerCamelCase_ : Matrix = [[0 for _ in range(lowerCamelCase__ )] for _ in range(lowerCamelCase__ )] lowerCamelCase_ : Matrix = [[0] for _ in range(lowerCamelCase__ )] lowerCamelCase_ : Matrix lowerCamelCase_ : int lowerCamelCase_ : int lowerCamelCase_ : int for x_val, y_val in enumerate(lowerCamelCase__ ): for col in range(lowerCamelCase__ ): lowerCamelCase_ : Dict = (x_val + 1) ** (size - col - 1) lowerCamelCase_ : List[str] = y_val lowerCamelCase_ : Optional[int] = solve(lowerCamelCase__ , lowerCamelCase__ ) def interpolated_func(lowerCamelCase__ ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(lowerCamelCase__ ) ) return interpolated_func def _a ( lowerCamelCase__ ) -> int: return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**10 ) def _a ( lowerCamelCase__ = question_function , lowerCamelCase__ = 10 ) -> int: lowerCamelCase_ : list[int] = [func(lowerCamelCase__ ) for x_val in range(1 , order + 1 )] lowerCamelCase_ : list[Callable[[int], int]] = [ interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 ) ] lowerCamelCase_ : int = 0 lowerCamelCase_ : Callable[[int], int] lowerCamelCase_ : int for poly in polynomials: lowerCamelCase_ : Optional[Any] = 1 while func(lowerCamelCase__ ) == poly(lowerCamelCase__ ): x_val += 1 ret += poly(lowerCamelCase__ ) return ret if __name__ == "__main__": print(f'''{solution() = }''')
144
import warnings from diffusers import StableDiffusionImgaImgPipeline # noqa F401 warnings.warn( '''The `image_to_image.py` script is outdated. Please use directly `from diffusers import''' ''' StableDiffusionImg2ImgPipeline` instead.''' )
144
1
import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ): """simple docstring""" a_ = BioGptTokenizer a_ = False def lowercase ( self : Union[str, Any] ) -> Union[str, Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __lowerCAmelCase = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'w</w>', 'r</w>', 't</w>', 'lo', 'low', 'er</w>', 'low</w>', 'lowest</w>', 'newer</w>', 'wider</w>', '<unk>', ] __lowerCAmelCase = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) ) __lowerCAmelCase = ['l o 123', 'lo w 1456', 'e r</w> 1789', ''] __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' ) as fp: fp.write(json.dumps(lowerCAmelCase_ ) ) with open(self.merges_file , 'w' ) as fp: fp.write('\n'.join(lowerCAmelCase_ ) ) def lowercase ( self : List[Any] , lowerCAmelCase_ : Optional[Any] ) -> int: __lowerCAmelCase = 'lower newer' __lowerCAmelCase = 'lower newer' return input_text, output_text def lowercase ( self : Optional[Any] ) -> int: __lowerCAmelCase = BioGptTokenizer(self.vocab_file , self.merges_file ) __lowerCAmelCase = 'lower' __lowerCAmelCase = ['low', 'er</w>'] __lowerCAmelCase = tokenizer.tokenize(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) __lowerCAmelCase = tokens + ['<unk>'] __lowerCAmelCase = [1_4, 1_5, 2_0] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , lowerCAmelCase_ ) @slow def lowercase ( self : int ) -> Union[str, Any]: __lowerCAmelCase = BioGptTokenizer.from_pretrained('microsoft/biogpt' ) __lowerCAmelCase = tokenizer.encode('sequence builders' , add_special_tokens=lowerCAmelCase_ ) __lowerCAmelCase = tokenizer.encode('multi-sequence build' , add_special_tokens=lowerCAmelCase_ ) __lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ ) __lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
53
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { """google/pegasus-large""": """https://huggingface.co/google/pegasus-large/resolve/main/config.json""", # See all PEGASUS models at https://huggingface.co/models?filter=pegasus } class a__ ( snake_case ): """simple docstring""" __lowerCamelCase = 'pegasus' __lowerCamelCase = ['past_key_values'] __lowerCamelCase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self , lowercase=50265 , lowercase=1024 , lowercase=12 , lowercase=4096 , lowercase=16 , lowercase=12 , lowercase=4096 , lowercase=16 , lowercase=0.0 , lowercase=0.0 , lowercase=True , lowercase=True , lowercase="gelu" , lowercase=1024 , lowercase=0.1 , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=0 , lowercase=False , lowercase=0 , lowercase=1 , lowercase=1 , **lowercase , ) -> Optional[Any]: '''simple docstring''' A__ = vocab_size A__ = max_position_embeddings A__ = d_model A__ = encoder_ffn_dim A__ = encoder_layers A__ = encoder_attention_heads A__ = decoder_ffn_dim A__ = decoder_layers A__ = decoder_attention_heads A__ = dropout A__ = attention_dropout A__ = activation_dropout A__ = activation_function A__ = init_std A__ = encoder_layerdrop A__ = decoder_layerdrop A__ = use_cache A__ = encoder_layers A__ = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=lowercase , eos_token_id=lowercase , is_encoder_decoder=lowercase , decoder_start_token_id=lowercase , forced_eos_token_id=lowercase , **lowercase , ) @property def UpperCamelCase ( self ) -> int: '''simple docstring''' return self.encoder_attention_heads @property def UpperCamelCase ( self ) -> int: '''simple docstring''' return self.d_model
514
0
import datasets import faiss import numpy as np import streamlit as st import torch from elasticsearch import Elasticsearch from elia_utils import ( embed_questions_for_retrieval, make_qa_sas_model, qa_sas_generate, query_es_index, query_qa_dense_index, ) import transformers from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer snake_case : int = '''bart''' snake_case : Union[str, Any] = True @st.cache(allow_output_mutation=__lowerCAmelCase ) def __lowercase ( ): if LOAD_DENSE_INDEX: a__ = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' ) a__ = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' ) a__ = qar_model.eval() else: a__ , a__ = (None, None) if MODEL_TYPE == "bart": a__ = AutoTokenizer.from_pretrained('yjernite/bart_eli5' ) a__ = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' ) a__ = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' ) sas_model.load_state_dict(save_dict['model'] ) a__ = sas_model.eval() else: a__ , a__ = make_qa_sas_model( model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' ) return (qar_tokenizer, qar_model, sas_tokenizer, sas_model) @st.cache(allow_output_mutation=__lowerCAmelCase ) def __lowercase ( ): if LOAD_DENSE_INDEX: a__ = faiss.StandardGpuResources() a__ = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train'] a__ = np.memmap( 'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 1_2_8) , ) a__ = faiss.IndexFlatIP(1_2_8 ) a__ = faiss.index_cpu_to_gpu(__lowerCAmelCase , 1 , __lowerCAmelCase ) wikiaab_gpu_index_flat.add(__lowerCAmelCase ) # TODO fix for larger GPU else: a__ , a__ = (None, None) a__ = Elasticsearch([{'host': 'localhost', 'port': '9200'}] ) return (wikiaab_passages, wikiaab_gpu_index_flat, es_client) @st.cache(allow_output_mutation=__lowerCAmelCase ) def __lowercase ( ): a__ = datasets.load_dataset('eli5' , name='LFQA_reddit' ) a__ = elia['train_eli5'] a__ = np.memmap( 'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 1_2_8) ) a__ = faiss.IndexFlatIP(1_2_8 ) eli5_train_q_index.add(__lowerCAmelCase ) return (elia_train, eli5_train_q_index) snake_case , snake_case , snake_case : Optional[int] = load_indexes() snake_case , snake_case , snake_case , snake_case : Any = load_models() snake_case , snake_case : Optional[int] = load_train_data() def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int=1_0 ): a__ = embed_questions_for_retrieval([question] , __lowerCAmelCase , __lowerCAmelCase ) a__ , a__ = eli5_train_q_index.search(__lowerCAmelCase , __lowerCAmelCase ) a__ = [elia_train[int(__lowerCAmelCase )] for i in I[0]] return nn_examples def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : Optional[int]="wiki40b" , __lowerCAmelCase : List[str]="dense" , __lowerCAmelCase : str=1_0 ): if source == "none": a__ , a__ = (' <P> '.join(['' for _ in range(1_1 )] ).strip(), []) else: if method == "dense": a__ , a__ = query_qa_dense_index( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) else: a__ , a__ = query_es_index( __lowerCAmelCase , __lowerCAmelCase , index_name='english_wiki40b_snippets_100w' , n_results=__lowerCAmelCase , ) a__ = [ (res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst ] a__ = 'question: {} context: {}'.format(__lowerCAmelCase , __lowerCAmelCase ) return question_doc, support_list @st.cache( hash_funcs={ torch.Tensor: (lambda __lowerCAmelCase : None), transformers.models.bart.tokenization_bart.BartTokenizer: (lambda __lowerCAmelCase : None), } ) def __lowercase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any=6_4 , __lowerCAmelCase : List[str]=2_5_6 , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Dict=2 , __lowerCAmelCase : int=0.95 , __lowerCAmelCase : Optional[Any]=0.8 ): with torch.no_grad(): a__ = qa_sas_generate( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , num_answers=1 , num_beams=__lowerCAmelCase , min_len=__lowerCAmelCase , max_len=__lowerCAmelCase , do_sample=__lowerCAmelCase , temp=__lowerCAmelCase , top_p=__lowerCAmelCase , top_k=__lowerCAmelCase , max_input_length=1_0_2_4 , device='cuda:0' , )[0] return (answer, support_list) st.title('''Long Form Question Answering with ELI5''') # Start sidebar snake_case : Optional[Any] = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>''' snake_case : str = ''' <html> <head> <style> .img-container { padding-left: 90px; padding-right: 90px; padding-top: 50px; padding-bottom: 50px; background-color: #f0f3f9; } </style> </head> <body> <span class="img-container"> <!-- Inline parent element --> %s </span> </body> </html> ''' % ( header_html, ) st.sidebar.markdown( header_full, unsafe_allow_html=True, ) # Long Form QA with ELI5 and Wikipedia snake_case : List[str] = ''' This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html). First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset, a pre-processed fixed snapshot of Wikipedia. ''' st.sidebar.markdown(description, unsafe_allow_html=True) snake_case : List[Any] = [ '''Answer the question''', '''View the retrieved document only''', '''View the most similar ELI5 question and answer''', '''Show me everything, please!''', ] snake_case : List[Any] = st.sidebar.checkbox('''Demo options''') if demo_options: snake_case : str = st.sidebar.selectbox( '''''', action_list, index=3, ) snake_case : Optional[Any] = action_list.index(action_st) snake_case : Optional[Any] = st.sidebar.selectbox( '''''', ['''Show full text of passages''', '''Show passage section titles'''], index=0, ) snake_case : List[Any] = show_type == '''Show full text of passages''' else: snake_case : Dict = 3 snake_case : str = True snake_case : List[Any] = st.sidebar.checkbox('''Retrieval options''') if retrieval_options: snake_case : List[str] = ''' ### Information retriever options The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs. The answer is then generated by sequence to sequence model which takes the question and retrieved document as input. ''' st.sidebar.markdown(retriever_info) snake_case : Union[str, Any] = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none''']) snake_case : Tuple = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed''']) else: snake_case : Dict = '''wiki40b''' snake_case : List[str] = '''dense''' snake_case : Optional[int] = '''beam''' snake_case : List[Any] = 2 snake_case : Tuple = 64 snake_case : List[Any] = 2_56 snake_case : List[Any] = None snake_case : str = None snake_case : Union[str, Any] = st.sidebar.checkbox('''Generation options''') if generate_options: snake_case : Union[str, Any] = ''' ### Answer generation options The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large) weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with **beam** search, or **sample** from the decoder\'s output probabilities. ''' st.sidebar.markdown(generate_info) snake_case : Any = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled''']) snake_case : Optional[int] = st.sidebar.slider( '''Minimum generation length''', min_value=8, max_value=2_56, value=64, step=8, format=None, key=None ) snake_case : Tuple = st.sidebar.slider( '''Maximum generation length''', min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None ) if sampled == "beam": snake_case : Tuple = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None) else: snake_case : Any = st.sidebar.slider( '''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None ) snake_case : Optional[int] = st.sidebar.slider( '''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None ) snake_case : List[Any] = None # start main text snake_case : Tuple = [ '''<MY QUESTION>''', '''How do people make chocolate?''', '''Why do we get a fever when we are sick?''', '''How can different animals perceive different colors?''', '''What is natural language processing?''', '''What\'s the best way to treat a sunburn?''', '''What exactly are vitamins ?''', '''How does nuclear energy provide electricity?''', '''What\'s the difference between viruses and bacteria?''', '''Why are flutes classified as woodwinds when most of them are made out of metal ?''', '''Why do people like drinking coffee even though it tastes so bad?''', '''What happens when wine ages? How does it make the wine taste better?''', '''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''', '''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''', '''How does New Zealand have so many large bird predators?''', ] snake_case : Dict = st.selectbox( '''What would you like to ask? ---- select <MY QUESTION> to enter a new query''', questions_list, index=1, ) if question_s == "<MY QUESTION>": snake_case : Tuple = st.text_input('''Enter your question here:''', '''''') else: snake_case : Optional[int] = question_s if st.button('''Show me!'''): if action in [0, 1, 3]: if index_type == "mixed": snake_case , snake_case : int = make_support(question, source=wiki_source, method='''dense''', n_results=10) snake_case , snake_case : Optional[int] = make_support(question, source=wiki_source, method='''sparse''', n_results=10) snake_case : Optional[int] = [] for res_d, res_s in zip(support_list_dense, support_list_sparse): if tuple(res_d) not in support_list: support_list += [tuple(res_d)] if tuple(res_s) not in support_list: support_list += [tuple(res_s)] snake_case : Optional[int] = support_list[:10] snake_case : Dict = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list]) else: snake_case , snake_case : Dict = make_support(question, source=wiki_source, method=index_type, n_results=10) if action in [0, 3]: snake_case , snake_case : Dict = answer_question( question_doc, sas_model, sas_tokenizer, min_len=min_len, max_len=int(max_len), sampling=(sampled == '''sampled'''), n_beams=n_beams, top_p=top_p, temp=temp, ) st.markdown('''### The model generated answer is:''') st.write(answer) if action in [0, 1, 3] and wiki_source != "none": st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''') for i, res in enumerate(support_list): snake_case : str = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_''')) snake_case : Union[str, Any] = res[1].strip() if sec_titles == "": snake_case : List[Any] = '''[{}]({})'''.format(res[0], wiki_url) else: snake_case : str = sec_titles.split(''' & ''') snake_case : Any = ''' & '''.join( ['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list] ) st.markdown( '''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections), unsafe_allow_html=True, ) if show_passages: st.write( '''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True ) if action in [2, 3]: snake_case : Union[str, Any] = find_nearest_training(question) snake_case : Union[str, Any] = nn_train_list[0] st.markdown( '''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title''']) ) snake_case : Tuple = [ '''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != ''''''])) for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score'''])) if i == 0 or sc > 2 ] st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st))) snake_case : Dict = ''' --- **Disclaimer** *The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system. Evaluating biases of such a model and ensuring factual generations are still very much open research problems. Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.* ''' st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
657
from math import ceil, sqrt def __lowercase ( __lowerCAmelCase : int = 1_0_0_0_0_0_0 ): a__ = 0 for outer_width in range(3 , (limit // 4) + 2 ): if outer_width**2 > limit: a__ = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 ) else: a__ = 1 if (outer_width - hole_width_lower_bound) % 2: hole_width_lower_bound += 1 answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1 return answer if __name__ == "__main__": print(f"""{solution() = }""")
657
1
import argparse from pathlib import Path import fairseq import torch from fairseq.models.xmod import XMODModel as FairseqXmodModel from packaging import version from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse('0.12.2'): raise Exception('requires fairseq >= 0.12.2') if version.parse(fairseq.__version__) > version.parse('2'): raise Exception('requires fairseq < v2') logging.set_verbosity_info() __UpperCamelCase : Any = logging.get_logger(__name__) __UpperCamelCase : Optional[int] = "Hello, World!" __UpperCamelCase : Optional[int] = "en_XX" def A ( _lowercase , _lowercase , _lowercase ): SCREAMING_SNAKE_CASE : List[str] = Path('''data_bin''' ) SCREAMING_SNAKE_CASE : List[Any] = FairseqXmodModel.from_pretrained( model_name_or_path=str(Path(_snake_case ).parent ) , checkpoint_file=Path(_snake_case ).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(_snake_case ) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(_snake_case ).parent / '''sentencepiece.bpe.model''' ) , src_dict=str(data_dir / '''dict.txt''' ) , ) xmod.eval() # disable dropout print(_snake_case ) SCREAMING_SNAKE_CASE : Optional[int] = xmod.model.encoder.sentence_encoder SCREAMING_SNAKE_CASE : int = XmodConfig( vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , ) if classification_head: SCREAMING_SNAKE_CASE : List[Any] = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0] print('''Our X-MOD config:''' , _snake_case ) SCREAMING_SNAKE_CASE : str = XmodForSequenceClassification(_snake_case ) if classification_head else XmodForMaskedLM(_snake_case ) model.eval() # Now let's copy all the weights. # Embeddings SCREAMING_SNAKE_CASE : str = xmod_sent_encoder.embed_tokens.weight SCREAMING_SNAKE_CASE : List[Any] = xmod_sent_encoder.embed_positions.weight SCREAMING_SNAKE_CASE : List[Any] = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them. SCREAMING_SNAKE_CASE : Any = xmod_sent_encoder.layernorm_embedding.weight SCREAMING_SNAKE_CASE : List[Any] = xmod_sent_encoder.layernorm_embedding.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer SCREAMING_SNAKE_CASE : str = model.roberta.encoder.layer[i] SCREAMING_SNAKE_CASE : Tuple = xmod_sent_encoder.layers[i] # self attention SCREAMING_SNAKE_CASE : List[str] = layer.attention.self if not ( xmod_layer.self_attn.k_proj.weight.data.shape == xmod_layer.self_attn.q_proj.weight.data.shape == xmod_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ): raise AssertionError('''Dimensions of self-attention weights do not match.''' ) SCREAMING_SNAKE_CASE : List[Any] = xmod_layer.self_attn.q_proj.weight SCREAMING_SNAKE_CASE : Union[str, Any] = xmod_layer.self_attn.q_proj.bias SCREAMING_SNAKE_CASE : Optional[int] = xmod_layer.self_attn.k_proj.weight SCREAMING_SNAKE_CASE : Any = xmod_layer.self_attn.k_proj.bias SCREAMING_SNAKE_CASE : Dict = xmod_layer.self_attn.v_proj.weight SCREAMING_SNAKE_CASE : List[Any] = xmod_layer.self_attn.v_proj.bias # self-attention output SCREAMING_SNAKE_CASE : Optional[int] = layer.attention.output if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape: raise AssertionError('''Dimensions of self-attention output weights do not match.''' ) SCREAMING_SNAKE_CASE : Optional[Any] = xmod_layer.self_attn.out_proj.weight SCREAMING_SNAKE_CASE : List[Any] = xmod_layer.self_attn.out_proj.bias SCREAMING_SNAKE_CASE : Tuple = xmod_layer.self_attn_layer_norm.weight SCREAMING_SNAKE_CASE : Union[str, Any] = xmod_layer.self_attn_layer_norm.bias # intermediate SCREAMING_SNAKE_CASE : Any = layer.intermediate if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError('''Dimensions of intermediate weights do not match.''' ) SCREAMING_SNAKE_CASE : List[Any] = xmod_layer.fca.weight SCREAMING_SNAKE_CASE : List[str] = xmod_layer.fca.bias # output SCREAMING_SNAKE_CASE : List[Any] = layer.output if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError('''Dimensions of feed-forward weights do not match.''' ) SCREAMING_SNAKE_CASE : Optional[Any] = xmod_layer.fca.weight SCREAMING_SNAKE_CASE : List[str] = xmod_layer.fca.bias SCREAMING_SNAKE_CASE : Optional[int] = xmod_layer.final_layer_norm.weight SCREAMING_SNAKE_CASE : Union[str, Any] = xmod_layer.final_layer_norm.bias if bert_output.adapter_layer_norm is not None: SCREAMING_SNAKE_CASE : Dict = xmod_layer.adapter_layer_norm.weight SCREAMING_SNAKE_CASE : Dict = xmod_layer.adapter_layer_norm.bias if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ): raise AssertionError('''Lists of language adapters do not match.''' ) for lang_code, adapter in xmod_layer.adapter_modules.items(): SCREAMING_SNAKE_CASE : Optional[int] = bert_output.adapter_modules[lang_code] SCREAMING_SNAKE_CASE : Union[str, Any] = xmod_layer.adapter_modules[lang_code] SCREAMING_SNAKE_CASE : Any = from_adapter.fca.weight SCREAMING_SNAKE_CASE : str = from_adapter.fca.bias SCREAMING_SNAKE_CASE : Optional[int] = from_adapter.fca.weight SCREAMING_SNAKE_CASE : Optional[Any] = from_adapter.fca.bias # end of layer if xmod_sent_encoder.layer_norm is not None: SCREAMING_SNAKE_CASE : List[Any] = xmod_sent_encoder.layer_norm.weight SCREAMING_SNAKE_CASE : Any = xmod_sent_encoder.layer_norm.bias if classification_head: SCREAMING_SNAKE_CASE : Optional[int] = xmod.model.classification_heads["mnli"].dense.weight SCREAMING_SNAKE_CASE : List[Any] = xmod.model.classification_heads["mnli"].dense.bias SCREAMING_SNAKE_CASE : Union[str, Any] = xmod.model.classification_heads["mnli"].out_proj.weight SCREAMING_SNAKE_CASE : Optional[int] = xmod.model.classification_heads["mnli"].out_proj.bias else: # LM Head SCREAMING_SNAKE_CASE : List[Any] = xmod.model.encoder.lm_head.dense.weight SCREAMING_SNAKE_CASE : Dict = xmod.model.encoder.lm_head.dense.bias SCREAMING_SNAKE_CASE : int = xmod.model.encoder.lm_head.layer_norm.weight SCREAMING_SNAKE_CASE : int = xmod.model.encoder.lm_head.layer_norm.bias SCREAMING_SNAKE_CASE : List[Any] = xmod.model.encoder.lm_head.weight SCREAMING_SNAKE_CASE : List[str] = xmod.model.encoder.lm_head.bias # Let's check that we get the same results. SCREAMING_SNAKE_CASE : Optional[int] = xmod.encode(_snake_case ).unsqueeze(0 ) # batch of size 1 model.roberta.set_default_language(_snake_case ) SCREAMING_SNAKE_CASE : Tuple = model(_snake_case )[0] if classification_head: SCREAMING_SNAKE_CASE : Optional[Any] = xmod.model.classification_heads["mnli"](xmod.extract_features(_snake_case ) ) else: SCREAMING_SNAKE_CASE : List[Any] = xmod.model(_snake_case , lang_id=[SAMPLE_LANGUAGE] )[0] print(our_output.shape , their_output.shape ) SCREAMING_SNAKE_CASE : Union[str, Any] = torch.max(torch.abs(our_output - their_output ) ).item() print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7 SCREAMING_SNAKE_CASE : List[str] = torch.allclose(_snake_case , _snake_case , atol=1e-3 ) print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' ) if not success: raise Exception('''Something went wRoNg''' ) Path(_snake_case ).mkdir(parents=_snake_case , exist_ok=_snake_case ) print(f"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(_snake_case ) if __name__ == "__main__": __UpperCamelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--xmod_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--classification_head', action='store_true', help='Whether to convert a final classification head.' ) __UpperCamelCase : Tuple = parser.parse_args() convert_xmod_checkpoint_to_pytorch( args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
248
import os import numpy import onnx def lowerCAmelCase_ ( _snake_case : Tuple , _snake_case : int ) -> List[str]: '''simple docstring''' __magic_name__ : Dict = a.name __magic_name__ : Optional[Any] = b.name __magic_name__ : Optional[int] = "" __magic_name__ : int = "" __magic_name__ : Any = a == b __magic_name__ : int = name_a __magic_name__ : List[str] = name_b return res def lowerCAmelCase_ ( _snake_case : Any , _snake_case : Dict , _snake_case : str ) -> str: '''simple docstring''' for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(_snake_case , _snake_case ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , _snake_case , _snake_case ) _graph_replace_input_with(node_proto.attribute[1].g , _snake_case , _snake_case ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , _snake_case , _snake_case ) def lowerCAmelCase_ ( _snake_case : Any , _snake_case : Optional[Any] , _snake_case : str ) -> Any: '''simple docstring''' for n in graph_proto.node: _node_replace_input_with(_snake_case , _snake_case , _snake_case ) def lowerCAmelCase_ ( _snake_case : List[str] , _snake_case : List[str] , _snake_case : Union[str, Any] ) -> List[Any]: '''simple docstring''' __magic_name__ : Tuple = list(model.graph.initializer ) __magic_name__ : Any = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i __magic_name__ : Dict = inits[i].name __magic_name__ : List[Any] = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , _snake_case , _snake_case ) def lowerCAmelCase_ ( _snake_case : str ) -> List[str]: '''simple docstring''' __magic_name__ : Union[str, Any] = os.path.dirname(_snake_case ) __magic_name__ : List[str] = os.path.basename(_snake_case ) __magic_name__ : Tuple = onnx.load(os.path.join(_snake_case , _snake_case ) ) __magic_name__ : Dict = list(model.graph.initializer ) __magic_name__ : Dict = set() __magic_name__ : Any = {} __magic_name__ : Tuple = [] __magic_name__ : Optional[int] = 0 for i in range(len(_snake_case ) ): if i in dup_set: continue for j in range(i + 1 , len(_snake_case ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(_snake_case ) dup_set.add(_snake_case ) __magic_name__ : Optional[int] = inits[j].data_type __magic_name__ : Any = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print("unexpected data type: " , _snake_case ) total_reduced_size += mem_size __magic_name__ : Optional[int] = inits[i].name __magic_name__ : Optional[Any] = inits[j].name if name_i in dup_map: dup_map[name_i].append(_snake_case ) else: __magic_name__ : Union[str, Any] = [name_j] ind_to_replace.append((j, i) ) print("total reduced size: " , total_reduced_size / 1024 / 1024 / 1024 , "GB" ) __magic_name__ : List[Any] = sorted(_snake_case ) _remove_dup_initializers_from_model(_snake_case , _snake_case , _snake_case ) __magic_name__ : List[str] = "optimized_" + model_file_name __magic_name__ : Tuple = os.path.join(_snake_case , _snake_case ) onnx.save(_snake_case , _snake_case ) return new_model
124
0
'''simple docstring''' import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, TextToVideoSDPipeline, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class lowercase__ ( snake_case_, unittest.TestCase ): '''simple docstring''' _snake_case = TextToVideoSDPipeline _snake_case = TEXT_TO_IMAGE_PARAMS _snake_case = TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. _snake_case = frozenset( [ '''num_inference_steps''', '''generator''', '''latents''', '''return_dict''', '''callback''', '''callback_steps''', ] ) def UpperCAmelCase ( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase = UNetaDConditionModel( block_out_channels=(3_2, 6_4, 6_4, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=3_2 , attention_head_dim=4 , ) UpperCamelCase = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , ) torch.manual_seed(0 ) UpperCamelCase = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , ) torch.manual_seed(0 ) UpperCamelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , ) UpperCamelCase = CLIPTextModel(lowerCamelCase__ ) UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) UpperCamelCase = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, } return components def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__=0 ): '''simple docstring''' if str(lowerCamelCase__ ).startswith('''mps''' ): UpperCamelCase = torch.manual_seed(lowerCamelCase__ ) else: UpperCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ ) UpperCamelCase = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''pt''', } return inputs def UpperCAmelCase ( self ): '''simple docstring''' UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator UpperCamelCase = self.get_dummy_components() UpperCamelCase = TextToVideoSDPipeline(**lowerCamelCase__ ) UpperCamelCase = sd_pipe.to(lowerCamelCase__ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ ) UpperCamelCase = self.get_dummy_inputs(lowerCamelCase__ ) UpperCamelCase = '''np''' UpperCamelCase = sd_pipe(**lowerCamelCase__ ).frames UpperCamelCase = frames[0][-3:, -3:, -1] assert frames[0].shape == (6_4, 6_4, 3) UpperCamelCase = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def UpperCAmelCase ( self ): '''simple docstring''' self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowerCamelCase__ , expected_max_diff=3e-3 ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def UpperCAmelCase ( self ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCamelCase__ , expected_max_diff=1e-2 ) @unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' ) def UpperCAmelCase ( self ): '''simple docstring''' pass @unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' ) def UpperCAmelCase ( self ): '''simple docstring''' pass @unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' ) def UpperCAmelCase ( self ): '''simple docstring''' pass def UpperCAmelCase ( self ): '''simple docstring''' return super().test_progress_bar() @slow @skip_mps class lowercase__ ( unittest.TestCase ): '''simple docstring''' def UpperCAmelCase ( self ): '''simple docstring''' UpperCamelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''' ) UpperCamelCase = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' ) UpperCamelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) UpperCamelCase = pipe.to('''cuda''' ) UpperCamelCase = '''Spiderman is surfing''' UpperCamelCase = torch.Generator(device='''cpu''' ).manual_seed(0 ) UpperCamelCase = pipe(lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=2_5 , output_type='''pt''' ).frames UpperCamelCase = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2 def UpperCAmelCase ( self ): '''simple docstring''' UpperCamelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''' ) UpperCamelCase = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' ) UpperCamelCase = pipe.to('''cuda''' ) UpperCamelCase = '''Spiderman is surfing''' UpperCamelCase = torch.Generator(device='''cpu''' ).manual_seed(0 ) UpperCamelCase = pipe(lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=2 , output_type='''pt''' ).frames UpperCamelCase = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2
350
'''simple docstring''' import os from typing import List, Optional, Union from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType from ..auto import AutoTokenizer class lowercase__ ( snake_case_ ): '''simple docstring''' _snake_case = ['''image_processor''', '''tokenizer'''] _snake_case = '''BlipImageProcessor''' _snake_case = '''AutoTokenizer''' def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' super().__init__(lowerCamelCase__ , lowerCamelCase__ ) # add QFormer tokenizer UpperCamelCase = qformer_tokenizer def __call__( self , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = True , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = 0 , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = True , lowerCamelCase__ = None , **lowerCamelCase__ , ): '''simple docstring''' if images is None and text is None: raise ValueError('''You have to specify at least images or text.''' ) UpperCamelCase = BatchFeature() if text is not None: UpperCamelCase = self.tokenizer( text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , ) encoding.update(lowerCamelCase__ ) UpperCamelCase = self.qformer_tokenizer( text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , ) UpperCamelCase = qformer_text_encoding.pop('''input_ids''' ) UpperCamelCase = qformer_text_encoding.pop('''attention_mask''' ) if images is not None: UpperCamelCase = self.image_processor(lowerCamelCase__ , return_tensors=lowerCamelCase__ ) encoding.update(lowerCamelCase__ ) return encoding def UpperCAmelCase ( self , *lowerCamelCase__ , **lowerCamelCase__ ): '''simple docstring''' return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ ) def UpperCAmelCase ( self , *lowerCamelCase__ , **lowerCamelCase__ ): '''simple docstring''' return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def UpperCAmelCase ( self ): '''simple docstring''' UpperCamelCase = self.tokenizer.model_input_names UpperCamelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) def UpperCAmelCase ( self , lowerCamelCase__ , **lowerCamelCase__ ): '''simple docstring''' if os.path.isfile(lowerCamelCase__ ): raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' ) os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ ) UpperCamelCase = os.path.join(lowerCamelCase__ , '''qformer_tokenizer''' ) self.qformer_tokenizer.save_pretrained(lowerCamelCase__ ) return super().save_pretrained(lowerCamelCase__ , **lowerCamelCase__ ) @classmethod def UpperCAmelCase ( cls , lowerCamelCase__ , **lowerCamelCase__ ): '''simple docstring''' UpperCamelCase = AutoTokenizer.from_pretrained(lowerCamelCase__ , subfolder='''qformer_tokenizer''' ) UpperCamelCase = cls._get_arguments_from_pretrained(lowerCamelCase__ , **lowerCamelCase__ ) args.append(lowerCamelCase__ ) return cls(*lowerCamelCase__ )
350
1
import os import unittest from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer from transformers.testing_utils import get_tests_dir from ...test_tokenization_common import TokenizerTesterMixin __SCREAMING_SNAKE_CASE =get_tests_dir("""fixtures/test_sentencepiece_bpe.model""") class __magic_name__ ( __lowerCamelCase , unittest.TestCase): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = BartphoTokenizer SCREAMING_SNAKE_CASE__ : Any = False SCREAMING_SNAKE_CASE__ : Any = True def _A ( self: List[Any] ): super().setUp() SCREAMING_SNAKE_CASE_ = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] SCREAMING_SNAKE_CASE_ = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) ) SCREAMING_SNAKE_CASE_ = {'''unk_token''': '''<unk>'''} SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] ) with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp: for token in vocab_tokens: fp.write(f"{token} {vocab_tokens[token]}\n" ) SCREAMING_SNAKE_CASE_ = BartphoTokenizer(_lowerCamelCase , self.monolingual_vocab_file , **self.special_tokens_map ) tokenizer.save_pretrained(self.tmpdirname ) def _A ( self: Dict , **_lowerCamelCase: int ): kwargs.update(self.special_tokens_map ) return BartphoTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def _A ( self: Optional[Any] , _lowerCamelCase: str ): SCREAMING_SNAKE_CASE_ = '''This is a là test''' SCREAMING_SNAKE_CASE_ = '''This is a<unk><unk> test''' return input_text, output_text def _A ( self: Dict ): SCREAMING_SNAKE_CASE_ = BartphoTokenizer(_lowerCamelCase , self.monolingual_vocab_file , **self.special_tokens_map ) SCREAMING_SNAKE_CASE_ = '''This is a là test''' SCREAMING_SNAKE_CASE_ = '''▁This ▁is ▁a ▁l à ▁t est'''.split() SCREAMING_SNAKE_CASE_ = tokenizer.tokenize(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE_ = tokens + [tokenizer.unk_token] SCREAMING_SNAKE_CASE_ = [4, 5, 6, 3, 3, 7, 8, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
234
import argparse import re from pathlib import Path import requests import torch from PIL import Image from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor from transformers import ( EfficientFormerConfig, EfficientFormerForImageClassificationWithTeacher, EfficientFormerImageProcessor, ) from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def a ( a , a ) ->Dict: '''simple docstring''' SCREAMING_SNAKE_CASE = old_name if "patch_embed" in old_name: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = old_name.split('''.''' ) if layer == "0": SCREAMING_SNAKE_CASE = old_name.replace('''0''' , '''convolution1''' ) elif layer == "1": SCREAMING_SNAKE_CASE = old_name.replace('''1''' , '''batchnorm_before''' ) elif layer == "3": SCREAMING_SNAKE_CASE = old_name.replace('''3''' , '''convolution2''' ) else: SCREAMING_SNAKE_CASE = old_name.replace('''4''' , '''batchnorm_after''' ) if "network" in old_name and re.search(r'''\d\.\d''' , a ): SCREAMING_SNAKE_CASE = r'''\b\d{2}\b''' if bool(re.search(a , a ) ): SCREAMING_SNAKE_CASE = re.search(r'''\d\.\d\d.''' , a ).group() else: SCREAMING_SNAKE_CASE = re.search(r'''\d\.\d.''' , a ).group() if int(match[0] ) < 6: SCREAMING_SNAKE_CASE = old_name.replace(a , '''''' ) SCREAMING_SNAKE_CASE = trimmed_name.replace('''network''' , match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] ) SCREAMING_SNAKE_CASE = '''intermediate_stages.''' + trimmed_name else: SCREAMING_SNAKE_CASE = old_name.replace(a , '''''' ) if int(match[2] ) < num_meta4D_last_stage: SCREAMING_SNAKE_CASE = trimmed_name.replace('''network''' , '''meta4D_layers.blocks.''' + match[2] ) else: SCREAMING_SNAKE_CASE = str(int(match[2] ) - num_meta4D_last_stage ) SCREAMING_SNAKE_CASE = trimmed_name.replace('''network''' , '''meta3D_layers.blocks.''' + layer_index ) if "norm1" in old_name: SCREAMING_SNAKE_CASE = trimmed_name.replace('''norm1''' , '''layernorm1''' ) elif "norm2" in old_name: SCREAMING_SNAKE_CASE = trimmed_name.replace('''norm2''' , '''layernorm2''' ) elif "fc1" in old_name: SCREAMING_SNAKE_CASE = trimmed_name.replace('''fc1''' , '''linear_in''' ) elif "fc2" in old_name: SCREAMING_SNAKE_CASE = trimmed_name.replace('''fc2''' , '''linear_out''' ) SCREAMING_SNAKE_CASE = '''last_stage.''' + trimmed_name elif "network" in old_name and re.search(r'''.\d.''' , a ): SCREAMING_SNAKE_CASE = old_name.replace('''network''' , '''intermediate_stages''' ) if "fc" in new_name: SCREAMING_SNAKE_CASE = new_name.replace('''fc''' , '''convolution''' ) elif ("norm1" in new_name) and ("layernorm1" not in new_name): SCREAMING_SNAKE_CASE = new_name.replace('''norm1''' , '''batchnorm_before''' ) elif ("norm2" in new_name) and ("layernorm2" not in new_name): SCREAMING_SNAKE_CASE = new_name.replace('''norm2''' , '''batchnorm_after''' ) if "proj" in new_name: SCREAMING_SNAKE_CASE = new_name.replace('''proj''' , '''projection''' ) if "dist_head" in new_name: SCREAMING_SNAKE_CASE = new_name.replace('''dist_head''' , '''distillation_classifier''' ) elif "head" in new_name: SCREAMING_SNAKE_CASE = new_name.replace('''head''' , '''classifier''' ) elif "patch_embed" in new_name: SCREAMING_SNAKE_CASE = '''efficientformer.''' + new_name elif new_name == "norm.weight" or new_name == "norm.bias": SCREAMING_SNAKE_CASE = new_name.replace('''norm''' , '''layernorm''' ) SCREAMING_SNAKE_CASE = '''efficientformer.''' + new_name else: SCREAMING_SNAKE_CASE = '''efficientformer.encoder.''' + new_name return new_name def a ( a , a ) ->Any: '''simple docstring''' for key in checkpoint.copy().keys(): SCREAMING_SNAKE_CASE = checkpoint.pop(a ) SCREAMING_SNAKE_CASE = val return checkpoint def a ( ) ->List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = '''http://images.cocodataset.org/val2017/000000039769.jpg''' SCREAMING_SNAKE_CASE = Image.open(requests.get(a , stream=a ).raw ) return image def a ( a , a , a , a ) ->Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE = torch.load(a , map_location='''cpu''' )['''model'''] SCREAMING_SNAKE_CASE = EfficientFormerConfig.from_json_file(a ) SCREAMING_SNAKE_CASE = EfficientFormerForImageClassificationWithTeacher(a ) SCREAMING_SNAKE_CASE = '''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] ) SCREAMING_SNAKE_CASE = config.depths[-1] - config.num_metaad_blocks + 1 SCREAMING_SNAKE_CASE = convert_torch_checkpoint(a , a ) model.load_state_dict(a ) model.eval() SCREAMING_SNAKE_CASE = { '''bilinear''': PILImageResampling.BILINEAR, '''bicubic''': PILImageResampling.BICUBIC, '''nearest''': PILImageResampling.NEAREST, } # prepare image SCREAMING_SNAKE_CASE = prepare_img() SCREAMING_SNAKE_CASE = 256 SCREAMING_SNAKE_CASE = 224 SCREAMING_SNAKE_CASE = EfficientFormerImageProcessor( size={'''shortest_edge''': image_size} , crop_size={'''height''': crop_size, '''width''': crop_size} , resample=pillow_resamplings['''bicubic'''] , ) SCREAMING_SNAKE_CASE = processor(images=a , return_tensors='''pt''' ).pixel_values # original processing pipeline SCREAMING_SNAKE_CASE = Compose( [ Resize(a , interpolation=pillow_resamplings['''bicubic'''] ), CenterCrop(a ), ToTensor(), Normalize(a , a ), ] ) SCREAMING_SNAKE_CASE = image_transforms(a ).unsqueeze(0 ) assert torch.allclose(a , a ) SCREAMING_SNAKE_CASE = model(a ) SCREAMING_SNAKE_CASE = outputs.logits SCREAMING_SNAKE_CASE = (1, 1000) if "l1" in model_name: SCREAMING_SNAKE_CASE = torch.Tensor( [-0.13_12, 0.43_53, -1.04_99, -0.51_24, 0.41_83, -0.67_93, -1.37_77, -0.08_93, -0.73_58, -2.43_28] ) assert torch.allclose(logits[0, :10] , a , atol=1E-3 ) assert logits.shape == expected_shape elif "l3" in model_name: SCREAMING_SNAKE_CASE = torch.Tensor( [-1.31_50, -1.54_56, -1.25_56, -0.84_96, -0.71_27, -0.78_97, -0.97_28, -0.30_52, 0.37_51, -0.31_27] ) assert torch.allclose(logits[0, :10] , a , atol=1E-3 ) assert logits.shape == expected_shape elif "l7" in model_name: SCREAMING_SNAKE_CASE = torch.Tensor( [-1.02_83, -1.41_31, -0.56_44, -1.31_15, -0.57_85, -1.20_49, -0.75_28, 0.19_92, -0.38_22, -0.08_78] ) assert logits.shape == expected_shape else: raise ValueError( F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" ) # Save Checkpoints Path(a ).mkdir(exist_ok=a ) model.save_pretrained(a ) print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" ) processor.save_pretrained(a ) print(F"""Processor successfuly saved at {pytorch_dump_path}""" ) if push_to_hub: print('''Pushing model to the hub...''' ) model.push_to_hub( repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message='''Add model''' , use_temp_dir=a , ) processor.push_to_hub( repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message='''Add image processor''' , use_temp_dir=a , ) if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '--pytorch_model_path', default=None, type=str, required=True, help='Path to EfficientFormer pytorch checkpoint.', ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The json file for EfficientFormer model config.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') parser.add_argument( '--no-push_to_hub', dest='push_to_hub', action='store_false', help='Do not push model and image processor to the hub', ) parser.set_defaults(push_to_hub=True) __lowerCAmelCase = parser.parse_args() convert_efficientformer_checkpoint( checkpoint_path=args.pytorch_model_path, efficientformer_config_file=args.config_file, pytorch_dump_path=args.pytorch_dump_path, push_to_hub=args.push_to_hub, )
201
0
import datasets snake_case = """\ @InProceedings{conneau2018xnli, author = \"Conneau, Alexis and Rinott, Ruty and Lample, Guillaume and Williams, Adina and Bowman, Samuel R. and Schwenk, Holger and Stoyanov, Veselin\", title = \"XNLI: Evaluating Cross-lingual Sentence Representations\", booktitle = \"Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing\", year = \"2018\", publisher = \"Association for Computational Linguistics\", location = \"Brussels, Belgium\", } """ snake_case = """\ XNLI is a subset of a few thousand examples from MNLI which has been translated into a 14 different languages (some low-ish resource). As with MNLI, the goal is to predict textual entailment (does sentence A imply/contradict/neither sentence B) and is a classification task (given two sentences, predict one of three labels). """ snake_case = """ Computes XNLI score which is just simple accuracy. Args: predictions: Predicted labels. references: Ground truth labels. Returns: 'accuracy': accuracy Examples: >>> predictions = [0, 1] >>> references = [0, 1] >>> xnli_metric = datasets.load_metric(\"xnli\") >>> results = xnli_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0} """ def SCREAMING_SNAKE_CASE__ ( snake_case__ :Tuple , snake_case__ :int ) -> Dict: return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A_ ( datasets.Metric ): """simple docstring""" def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]: return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { 'predictions': datasets.Value('int64' if self.config_name != 'sts-b' else 'float32' ), 'references': datasets.Value('int64' if self.config_name != 'sts-b' else 'float32' ), } ) ,codebase_urls=[] ,reference_urls=[] ,format='numpy' ,) def __UpperCAmelCase ( self : Any ,__A : int ,__A : Optional[Any] ) -> List[Any]: return {"accuracy": simple_accuracy(__A ,__A )}
714
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers import ( TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, BertConfig, DPRConfig, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, ) class A_ : """simple docstring""" def __init__( self : Union[str, Any] ,__A : Tuple ,__A : Dict=13 ,__A : Union[str, Any]=7 ,__A : Optional[Any]=True ,__A : Dict=True ,__A : List[str]=True ,__A : int=True ,__A : str=99 ,__A : Tuple=32 ,__A : List[Any]=2 ,__A : Dict=4 ,__A : List[str]=37 ,__A : Dict="gelu" ,__A : Optional[Any]=0.1 ,__A : Dict=0.1 ,__A : Dict=512 ,__A : Any=16 ,__A : Tuple=2 ,__A : List[Any]=0.02 ,__A : Any=3 ,__A : Tuple=4 ,__A : Dict=None ,__A : List[Any]=0 ,) -> Optional[Any]: _lowercase = parent _lowercase = batch_size _lowercase = seq_length _lowercase = is_training _lowercase = use_input_mask _lowercase = use_token_type_ids _lowercase = use_labels _lowercase = vocab_size _lowercase = hidden_size _lowercase = num_hidden_layers _lowercase = num_attention_heads _lowercase = intermediate_size _lowercase = hidden_act _lowercase = hidden_dropout_prob _lowercase = attention_probs_dropout_prob _lowercase = max_position_embeddings _lowercase = type_vocab_size _lowercase = type_sequence_label_size _lowercase = initializer_range _lowercase = num_labels _lowercase = num_choices _lowercase = scope _lowercase = projection_dim def __UpperCAmelCase ( self : List[Any] ) -> List[str]: _lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) _lowercase = None if self.use_input_mask: # follow test_modeling_tf_ctrl.py _lowercase = random_attention_mask([self.batch_size, self.seq_length] ) _lowercase = None if self.use_token_type_ids: _lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) _lowercase = None _lowercase = None _lowercase = None if self.use_labels: _lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) _lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) _lowercase = ids_tensor([self.batch_size] ,self.num_choices ) _lowercase = BertConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__A ,initializer_range=self.initializer_range ,) _lowercase = DPRConfig(projection_dim=self.projection_dim ,**config.to_dict() ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __UpperCAmelCase ( self : List[Any] ,__A : Dict ,__A : str ,__A : Any ,__A : Dict ,__A : Tuple ,__A : Tuple ,__A : int ) -> List[str]: _lowercase = TFDPRContextEncoder(config=__A ) _lowercase = model(__A ,attention_mask=__A ,token_type_ids=__A ) _lowercase = model(__A ,token_type_ids=__A ) _lowercase = model(__A ) self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.projection_dim or self.hidden_size) ) def __UpperCAmelCase ( self : List[Any] ,__A : Optional[Any] ,__A : Tuple ,__A : Optional[Any] ,__A : Union[str, Any] ,__A : Tuple ,__A : Tuple ,__A : Any ) -> str: _lowercase = TFDPRQuestionEncoder(config=__A ) _lowercase = model(__A ,attention_mask=__A ,token_type_ids=__A ) _lowercase = model(__A ,token_type_ids=__A ) _lowercase = model(__A ) self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.projection_dim or self.hidden_size) ) def __UpperCAmelCase ( self : int ,__A : List[str] ,__A : Optional[int] ,__A : List[str] ,__A : str ,__A : str ,__A : int ,__A : int ) -> Optional[Any]: _lowercase = TFDPRReader(config=__A ) _lowercase = model(__A ,attention_mask=__A ) self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.relevance_logits.shape ,(self.batch_size,) ) def __UpperCAmelCase ( self : str ) -> str: _lowercase = self.prepare_config_and_inputs() ( ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ) = config_and_inputs _lowercase = {'input_ids': input_ids} return config, inputs_dict @require_tf class A_ ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = ( ( TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, ) if is_tf_available() else () ) SCREAMING_SNAKE_CASE_ : Optional[Any] = {'''feature-extraction''': TFDPRQuestionEncoder} if is_tf_available() else {} SCREAMING_SNAKE_CASE_ : List[Any] = False SCREAMING_SNAKE_CASE_ : Dict = False SCREAMING_SNAKE_CASE_ : int = False SCREAMING_SNAKE_CASE_ : List[Any] = False SCREAMING_SNAKE_CASE_ : Optional[int] = False def __UpperCAmelCase ( self : List[str] ) -> Optional[int]: _lowercase = TFDPRModelTester(self ) _lowercase = ConfigTester(self ,config_class=__A ,hidden_size=37 ) def __UpperCAmelCase ( self : Any ) -> List[str]: self.config_tester.run_common_tests() def __UpperCAmelCase ( self : Dict ) -> Optional[int]: _lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_context_encoder(*__A ) def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]: _lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_question_encoder(*__A ) def __UpperCAmelCase ( self : Dict ) -> List[Any]: _lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_reader(*__A ) @slow def __UpperCAmelCase ( self : Optional[Any] ) -> str: for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowercase = TFDPRContextEncoder.from_pretrained(__A ) self.assertIsNotNone(__A ) for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowercase = TFDPRContextEncoder.from_pretrained(__A ) self.assertIsNotNone(__A ) for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowercase = TFDPRQuestionEncoder.from_pretrained(__A ) self.assertIsNotNone(__A ) for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowercase = TFDPRReader.from_pretrained(__A ) self.assertIsNotNone(__A ) @require_tf class A_ ( unittest.TestCase ): """simple docstring""" @slow def __UpperCAmelCase ( self : List[Any] ) -> int: _lowercase = TFDPRQuestionEncoder.from_pretrained('facebook/dpr-question_encoder-single-nq-base' ) _lowercase = tf.constant( [[101, 7592, 1010, 2003, 2026, 3899, 1_0140, 1029, 102]] ) # [CLS] hello, is my dog cute? [SEP] _lowercase = model(__A )[0] # embedding shape = (1, 768) # compare the actual values for a slice. _lowercase = tf.constant( [ [ 0.03236253, 0.12753335, 0.16818509, 0.00279786, 0.3896933, 0.24264945, 0.2178971, -0.02335227, -0.08481959, -0.14324117, ] ] ) self.assertTrue(numpy.allclose(output[:, :10].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
535
0
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer import diffusers from diffusers import ( AutoencoderKL, EulerDiscreteScheduler, StableDiffusionLatentUpscalePipeline, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() def _lowerCamelCase ( __a ): SCREAMING_SNAKE_CASE_ = [tensor.shape for tensor in tensor_list] return all(shape == shapes[0] for shape in shapes[1:] ) class snake_case ( __lowercase , __lowercase , __lowercase , unittest.TestCase ): UpperCAmelCase__ = StableDiffusionLatentUpscalePipeline UpperCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { '''height''', '''width''', '''cross_attention_kwargs''', '''negative_prompt_embeds''', '''prompt_embeds''', } UpperCAmelCase__ = PipelineTesterMixin.required_optional_params - {'''num_images_per_prompt'''} UpperCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS UpperCAmelCase__ = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess UpperCAmelCase__ = frozenset([] ) UpperCAmelCase__ = True @property def _lowercase (self ): """simple docstring""" SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = 4 SCREAMING_SNAKE_CASE_ = (16, 16) SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ ) return image def _lowercase (self ): """simple docstring""" torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_ = UNetaDConditionModel( act_fn='''gelu''' , attention_head_dim=8 , norm_num_groups=SCREAMING_SNAKE_CASE_ , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=( '''KDownBlock2D''', '''KCrossAttnDownBlock2D''', '''KCrossAttnDownBlock2D''', '''KCrossAttnDownBlock2D''', ) , in_channels=8 , mid_block_type=SCREAMING_SNAKE_CASE_ , only_cross_attention=SCREAMING_SNAKE_CASE_ , out_channels=5 , resnet_time_scale_shift='''scale_shift''' , time_embedding_type='''fourier''' , timestep_post_act='''gelu''' , up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') , ) SCREAMING_SNAKE_CASE_ = AutoencoderKL( block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[ '''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D''', ] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) SCREAMING_SNAKE_CASE_ = EulerDiscreteScheduler(prediction_type='''sample''' ) SCREAMING_SNAKE_CASE_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''quick_gelu''' , projection_dim=5_12 , ) SCREAMING_SNAKE_CASE_ = CLIPTextModel(SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) SCREAMING_SNAKE_CASE_ = { '''unet''': model.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, } return components def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ): """simple docstring""" if str(SCREAMING_SNAKE_CASE_ ).startswith('''mps''' ): SCREAMING_SNAKE_CASE_ = torch.manual_seed(SCREAMING_SNAKE_CASE_ ) else: SCREAMING_SNAKE_CASE_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': self.dummy_image.cpu(), '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs def _lowercase (self ): """simple docstring""" SCREAMING_SNAKE_CASE_ = '''cpu''' SCREAMING_SNAKE_CASE_ = self.get_dummy_components() SCREAMING_SNAKE_CASE_ = self.pipeline_class(**SCREAMING_SNAKE_CASE_ ) pipe.to(SCREAMING_SNAKE_CASE_ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ = pipe(**SCREAMING_SNAKE_CASE_ ).images SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 2_56, 2_56, 3) ) SCREAMING_SNAKE_CASE_ = np.array( [0.47_22_24_12, 0.41_92_16_33, 0.44_71_74_34, 0.46_87_41_92, 0.42_58_82_58, 0.46_15_07_26, 0.4_67_75_34, 0.45_58_38_32, 0.48_57_90_55] ) SCREAMING_SNAKE_CASE_ = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(SCREAMING_SNAKE_CASE_ , 1e-3 ) def _lowercase (self ): """simple docstring""" super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 ) def _lowercase (self ): """simple docstring""" super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 ) def _lowercase (self ): """simple docstring""" super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 ) def _lowercase (self ): """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=7e-3 ) def _lowercase (self ): """simple docstring""" super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 ) def _lowercase (self ): """simple docstring""" super().test_save_load_local(expected_max_difference=3e-3 ) def _lowercase (self ): """simple docstring""" super().test_save_load_optional_components(expected_max_difference=3e-3 ) def _lowercase (self ): """simple docstring""" SCREAMING_SNAKE_CASE_ = [ '''DDIMScheduler''', '''DDPMScheduler''', '''PNDMScheduler''', '''HeunDiscreteScheduler''', '''EulerAncestralDiscreteScheduler''', '''KDPM2DiscreteScheduler''', '''KDPM2AncestralDiscreteScheduler''', '''DPMSolverSDEScheduler''', ] SCREAMING_SNAKE_CASE_ = self.get_dummy_components() SCREAMING_SNAKE_CASE_ = self.pipeline_class(**SCREAMING_SNAKE_CASE_ ) # make sure that PNDM does not need warm-up pipe.scheduler.register_to_config(skip_prk_steps=SCREAMING_SNAKE_CASE_ ) pipe.to(SCREAMING_SNAKE_CASE_ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ = 2 SCREAMING_SNAKE_CASE_ = [] for scheduler_enum in KarrasDiffusionSchedulers: if scheduler_enum.name in skip_schedulers: # no sigma schedulers are not supported # no schedulers continue SCREAMING_SNAKE_CASE_ = getattr(SCREAMING_SNAKE_CASE_ , scheduler_enum.name ) SCREAMING_SNAKE_CASE_ = scheduler_cls.from_config(pipe.scheduler.config ) SCREAMING_SNAKE_CASE_ = pipe(**SCREAMING_SNAKE_CASE_ )[0] outputs.append(SCREAMING_SNAKE_CASE_ ) assert check_same_shape(SCREAMING_SNAKE_CASE_ ) @require_torch_gpu @slow class snake_case ( unittest.TestCase ): def _lowercase (self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase (self ): """simple docstring""" SCREAMING_SNAKE_CASE_ = torch.manual_seed(33 ) SCREAMING_SNAKE_CASE_ = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' , torch_dtype=torch.floataa ) pipe.to('''cuda''' ) SCREAMING_SNAKE_CASE_ = StableDiffusionLatentUpscalePipeline.from_pretrained( '''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa ) upscaler.to('''cuda''' ) SCREAMING_SNAKE_CASE_ = '''a photo of an astronaut high resolution, unreal engine, ultra realistic''' SCREAMING_SNAKE_CASE_ = pipe(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , output_type='''latent''' ).images SCREAMING_SNAKE_CASE_ = upscaler( prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , num_inference_steps=20 , guidance_scale=0 , generator=SCREAMING_SNAKE_CASE_ , output_type='''np''' , ).images[0] SCREAMING_SNAKE_CASE_ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' ) assert np.abs((expected_image - image).mean() ) < 5e-2 def _lowercase (self ): """simple docstring""" SCREAMING_SNAKE_CASE_ = torch.manual_seed(33 ) SCREAMING_SNAKE_CASE_ = StableDiffusionLatentUpscalePipeline.from_pretrained( '''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa ) upscaler.to('''cuda''' ) SCREAMING_SNAKE_CASE_ = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas''' SCREAMING_SNAKE_CASE_ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' ) SCREAMING_SNAKE_CASE_ = upscaler( prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , num_inference_steps=20 , guidance_scale=0 , generator=SCREAMING_SNAKE_CASE_ , output_type='''np''' , ).images[0] SCREAMING_SNAKE_CASE_ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' ) assert np.abs((expected_image - image).max() ) < 5e-2
626
"""simple docstring""" import logging import numpy as np import pytest from scipy.linalg import eigh logging.basicConfig(level=logging.INFO, format='%(message)s') def _lowerCamelCase ( __a ): return input_array.reshape((input_array.size, 1) ) def _lowerCamelCase ( __a, __a, __a ): SCREAMING_SNAKE_CASE_ = np.nan for i in range(__a ): SCREAMING_SNAKE_CASE_ = features[:, labels == i] SCREAMING_SNAKE_CASE_ = data.mean(1 ) # Centralize the data of class i SCREAMING_SNAKE_CASE_ = data - column_reshape(__a ) if i > 0: # If covariance_sum is not None covariance_sum += np.dot(__a, centered_data.T ) else: # If covariance_sum is np.nan (i.e. first loop) SCREAMING_SNAKE_CASE_ = np.dot(__a, centered_data.T ) return covariance_sum / features.shape[1] def _lowerCamelCase ( __a, __a, __a ): SCREAMING_SNAKE_CASE_ = features.mean(1 ) SCREAMING_SNAKE_CASE_ = np.nan for i in range(__a ): SCREAMING_SNAKE_CASE_ = features[:, labels == i] SCREAMING_SNAKE_CASE_ = data.shape[1] SCREAMING_SNAKE_CASE_ = data.mean(1 ) if i > 0: # If covariance_sum is not None covariance_sum += device_data * np.dot( column_reshape(__a ) - column_reshape(__a ), (column_reshape(__a ) - column_reshape(__a )).T, ) else: # If covariance_sum is np.nan (i.e. first loop) SCREAMING_SNAKE_CASE_ = device_data * np.dot( column_reshape(__a ) - column_reshape(__a ), (column_reshape(__a ) - column_reshape(__a )).T, ) return covariance_sum / features.shape[1] def _lowerCamelCase ( __a, __a ): # Check if the features have been loaded if features.any(): SCREAMING_SNAKE_CASE_ = features.mean(1 ) # Center the dataset SCREAMING_SNAKE_CASE_ = features - np.reshape(__a, (data_mean.size, 1) ) SCREAMING_SNAKE_CASE_ = np.dot(__a, centered_data.T ) / features.shape[1] SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = np.linalg.eigh(__a ) # Take all the columns in the reverse order (-1), and then takes only the first SCREAMING_SNAKE_CASE_ = eigenvectors[:, ::-1][:, 0:dimensions] # Project the database on the new space SCREAMING_SNAKE_CASE_ = np.dot(filtered_eigenvectors.T, __a ) logging.info('''Principal Component Analysis computed''' ) return projected_data else: logging.basicConfig(level=logging.ERROR, format='''%(message)s''', force=__a ) logging.error('''Dataset empty''' ) raise AssertionError def _lowerCamelCase ( __a, __a, __a, __a ): assert classes > dimensions # Check if features have been already loaded if features.any: SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = eigh( covariance_between_classes(__a, __a, __a ), covariance_within_classes(__a, __a, __a ), ) SCREAMING_SNAKE_CASE_ = eigenvectors[:, ::-1][:, :dimensions] SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = np.linalg.svd(__a ) SCREAMING_SNAKE_CASE_ = svd_matrix[:, 0:dimensions] SCREAMING_SNAKE_CASE_ = np.dot(filtered_svd_matrix.T, __a ) logging.info('''Linear Discriminant Analysis computed''' ) return projected_data else: logging.basicConfig(level=logging.ERROR, format='''%(message)s''', force=__a ) logging.error('''Dataset empty''' ) raise AssertionError def _lowerCamelCase ( ): # Create dummy dataset with 2 classes and 3 features SCREAMING_SNAKE_CASE_ = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] ) SCREAMING_SNAKE_CASE_ = np.array([0, 0, 0, 1, 1] ) SCREAMING_SNAKE_CASE_ = 2 SCREAMING_SNAKE_CASE_ = 2 # Assert that the function raises an AssertionError if dimensions > classes with pytest.raises(__a ) as error_info: SCREAMING_SNAKE_CASE_ = linear_discriminant_analysis( __a, __a, __a, __a ) if isinstance(__a, np.ndarray ): raise AssertionError( '''Did not raise AssertionError for dimensions > classes''' ) assert error_info.type is AssertionError def _lowerCamelCase ( ): SCREAMING_SNAKE_CASE_ = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] ) SCREAMING_SNAKE_CASE_ = 2 SCREAMING_SNAKE_CASE_ = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] ) with pytest.raises(__a ) as error_info: SCREAMING_SNAKE_CASE_ = principal_component_analysis(__a, __a ) if not np.allclose(__a, __a ): raise AssertionError assert error_info.type is AssertionError if __name__ == "__main__": import doctest doctest.testmod()
626
1
"""simple docstring""" from __future__ import annotations import math def _lowerCamelCase ( UpperCAmelCase_ : Union[str, Any], UpperCAmelCase_ : Tuple ) -> float: """simple docstring""" A__ = u for i in range(1, __UpperCAmelCase ): A__ = temp * (u - i) return temp def _lowerCamelCase ( ) -> None: """simple docstring""" A__ = int(input("enter the numbers of values: " ) ) A__ = [] for _ in range(__UpperCAmelCase ): y.append([] ) for i in range(__UpperCAmelCase ): for j in range(__UpperCAmelCase ): y[i].append(__UpperCAmelCase ) A__ = 0 print("enter the values of parameters in a list: " ) A__ = list(map(__UpperCAmelCase, input().split() ) ) print("enter the values of corresponding parameters: " ) for i in range(__UpperCAmelCase ): A__ = float(input() ) A__ = int(input("enter the value to interpolate: " ) ) A__ = (value - x[0]) / (x[1] - x[0]) # for calculating forward difference table for i in range(1, __UpperCAmelCase ): for j in range(n - i ): A__ = y[j + 1][i - 1] - y[j][i - 1] A__ = y[0][0] for i in range(1, __UpperCAmelCase ): summ += (ucal(__UpperCAmelCase, __UpperCAmelCase ) * y[0][i]) / math.factorial(__UpperCAmelCase ) print(F"""the value at {value} is {summ}""" ) if __name__ == "__main__": main()
713
"""simple docstring""" from typing import List, Optional, Union import numpy as np import PIL.Image from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, PILImageResampling, get_image_size, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging UpperCamelCase = logging.get_logger(__name__) class UpperCamelCase__ ( _lowerCAmelCase ): """simple docstring""" A__ : int = ["pixel_values"] def __init__( self , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = 32 , SCREAMING_SNAKE_CASE__=PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ = True , **SCREAMING_SNAKE_CASE__ , ) -> None: A__ = do_resize A__ = do_rescale A__ = size_divisor A__ = resample super().__init__(**SCREAMING_SNAKE_CASE__ ) def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ ) -> np.ndarray: A__ , A__ = get_image_size(SCREAMING_SNAKE_CASE__ ) # Rounds the height and width down to the closest multiple of size_divisor A__ = height // size_divisor * size_divisor A__ = width // size_divisor * size_divisor A__ = resize(SCREAMING_SNAKE_CASE__ , (new_h, new_w) , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) return image def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ ) -> np.ndarray: return rescale(image=SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ , ) -> BatchFeature: A__ = do_resize if do_resize is not None else self.do_resize A__ = do_rescale if do_rescale is not None else self.do_rescale A__ = size_divisor if size_divisor is not None else self.size_divisor A__ = resample if resample is not None else self.resample if do_resize and size_divisor is None: raise ValueError("size_divisor is required for resizing" ) A__ = make_list_of_images(SCREAMING_SNAKE_CASE__ ) if not valid_images(SCREAMING_SNAKE_CASE__ ): raise ValueError("Invalid image(s)" ) # All transformations expect numpy arrays. A__ = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for img in images] if do_resize: A__ = [self.resize(SCREAMING_SNAKE_CASE__ , size_divisor=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ ) for image in images] if do_rescale: A__ = [self.rescale(SCREAMING_SNAKE_CASE__ , scale=1 / 255 ) for image in images] A__ = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images] A__ = {"pixel_values": images} return BatchFeature(data=SCREAMING_SNAKE_CASE__ , tensor_type=SCREAMING_SNAKE_CASE__ )
562
0
"""simple docstring""" # flake8: noqa # Lint as: python3 from typing import Dict, List, Optional, Type from .. import config from ..utils import logging from .formatting import ( ArrowFormatter, CustomFormatter, Formatter, PandasFormatter, PythonFormatter, TensorFormatter, format_table, query_table, ) from .np_formatter import NumpyFormatter lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = {} lowerCamelCase_ = {} lowerCamelCase_ = {} def snake_case ( A__ ,A__ ,A__ = None ,): UpperCAmelCase_ : str = aliases if aliases is not None else [] if format_type in _FORMAT_TYPES: logger.warning( F"""Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})""" ) UpperCAmelCase_ : Optional[int] = formatter_cls for alias in set(aliases + [format_type] ): if alias in _FORMAT_TYPES_ALIASES: logger.warning( F"""Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})""" ) UpperCAmelCase_ : str = format_type def snake_case ( A__ ,A__ ,A__ = None ): UpperCAmelCase_ : Union[str, Any] = aliases if aliases is not None else [] for alias in set(aliases + [format_type] ): UpperCAmelCase_ : Union[str, Any] = unavailable_error # Here we define all the available formatting functions that can be used by `Dataset.set_format` _register_formatter(PythonFormatter, None, aliases=['''python''']) _register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow''']) _register_formatter(NumpyFormatter, '''numpy''', aliases=['''np''']) _register_formatter(PandasFormatter, '''pandas''', aliases=['''pd''']) _register_formatter(CustomFormatter, '''custom''') if config.TORCH_AVAILABLE: from .torch_formatter import TorchFormatter _register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch''']) else: lowerCamelCase_ = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''') _register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch''']) if config.TF_AVAILABLE: from .tf_formatter import TFFormatter _register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf''']) else: lowerCamelCase_ = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''') _register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf''']) if config.JAX_AVAILABLE: from .jax_formatter import JaxFormatter _register_formatter(JaxFormatter, '''jax''', aliases=[]) else: lowerCamelCase_ = ValueError('''JAX needs to be installed to be able to return JAX arrays.''') _register_unavailable_formatter(_jax_error, '''jax''', aliases=[]) def snake_case ( A__ ): if format_type in _FORMAT_TYPES_ALIASES: return _FORMAT_TYPES_ALIASES[format_type] else: return format_type def snake_case ( A__ ,**A__ ): UpperCAmelCase_ : Optional[int] = get_format_type_from_alias(A__ ) if format_type in _FORMAT_TYPES: return _FORMAT_TYPES[format_type](**A__ ) if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE: raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type] else: raise ValueError( F"""Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'""" )
95
"""simple docstring""" import timeit import numpy as np import datasets from datasets.arrow_writer import ArrowWriter from datasets.features.features import _ArrayXD def snake_case ( A__ ): def wrapper(*A__ ,**A__ ): UpperCAmelCase_ : Union[str, Any] = timeit.default_timer() UpperCAmelCase_ : Union[str, Any] = func(*A__ ,**A__ ) UpperCAmelCase_ : Dict = timeit.default_timer() - starttime return delta UpperCAmelCase_ : Optional[Any] = func.__name__ return wrapper def snake_case ( A__ ,A__=1_00 ,A__=None ): UpperCAmelCase_ : str = [] UpperCAmelCase_ : List[str] = seq_shapes or {} for i in range(A__ ): UpperCAmelCase_ : int = {} for col_id, (k, v) in enumerate(features.items() ): if isinstance(A__ ,_ArrayXD ): UpperCAmelCase_ : Optional[int] = np.random.rand(*v.shape ).astype(v.dtype ) elif isinstance(A__ ,datasets.Value ): if v.dtype == "string": UpperCAmelCase_ : List[str] = "The small grey turtle was surprisingly fast when challenged." else: UpperCAmelCase_ : List[str] = np.random.randint(10 ,size=1 ).astype(v.dtype ).item() elif isinstance(A__ ,datasets.Sequence ): while isinstance(A__ ,datasets.Sequence ): UpperCAmelCase_ : List[str] = v.feature UpperCAmelCase_ : List[Any] = seq_shapes[k] UpperCAmelCase_ : Dict = np.random.rand(*A__ ).astype(v.dtype ) UpperCAmelCase_ : Dict = data dummy_data.append((i, example) ) return dummy_data def snake_case ( A__ ,A__ ,A__=1_00 ,A__=None ): UpperCAmelCase_ : Optional[Any] = generate_examples(A__ ,num_examples=A__ ,seq_shapes=A__ ) with ArrowWriter(features=A__ ,path=A__ ) as writer: for key, record in dummy_data: UpperCAmelCase_ : Any = features.encode_example(A__ ) writer.write(A__ ) UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = writer.finalize() if not num_final_examples == num_examples: raise ValueError( F"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" ) UpperCAmelCase_ : List[Any] = datasets.Dataset.from_file(filename=A__ ,info=datasets.DatasetInfo(features=A__ ) ) return dataset
95
1
"""simple docstring""" import argparse import os import re import packaging.version __snake_case = 'examples/' __snake_case = { 'examples': (re.compile(R'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'), 'init': (re.compile(R'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'), 'setup': (re.compile(R'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), R'\1version="VERSION",'), 'doc': (re.compile(R'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'), } __snake_case = { 'init': 'src/diffusers/__init__.py', 'setup': 'setup.py', } __snake_case = 'README.md' def _lowerCamelCase ( lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Union[str, Any] ): with open(lowerCamelCase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowercase__ : Union[str, Any] = f.read() lowercase__ , lowercase__ : Optional[int] = REPLACE_PATTERNS[pattern] lowercase__ : List[str] = replace.replace("""VERSION""" , lowerCamelCase__ ) lowercase__ : Union[str, Any] = re_pattern.sub(lowerCamelCase__ , lowerCamelCase__ ) with open(lowerCamelCase__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.write(lowerCamelCase__ ) def _lowerCamelCase ( lowerCamelCase__ : Union[str, Any] ): for folder, directories, fnames in os.walk(lowerCamelCase__ ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("""research_projects""" ) if "legacy" in directories: directories.remove("""legacy""" ) for fname in fnames: if fname.endswith(""".py""" ): update_version_in_file(os.path.join(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ , pattern="""examples""" ) def _lowerCamelCase ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int=False ): for pattern, fname in REPLACE_FILES.items(): update_version_in_file(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) if not patch: update_version_in_examples(lowerCamelCase__ ) def _lowerCamelCase ( ): lowercase__ : Optional[Any] = """🤗 Transformers currently provides the following architectures""" lowercase__ : Union[str, Any] = """1. Want to contribute a new model?""" with open(lowerCamelCase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowercase__ : Optional[Any] = f.readlines() # Find the start of the list. lowercase__ : str = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 lowercase__ : Dict = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("""1.""" ): lowercase__ : List[str] = lines[index].replace( """https://huggingface.co/docs/diffusers/main/model_doc""" , """https://huggingface.co/docs/diffusers/model_doc""" , ) index += 1 with open(lowerCamelCase__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(lowerCamelCase__ ) def _lowerCamelCase ( ): with open(REPLACE_FILES["""init"""] , """r""" ) as f: lowercase__ : int = f.read() lowercase__ : Optional[int] = REPLACE_PATTERNS["""init"""][0].search(lowerCamelCase__ ).groups()[0] return packaging.version.parse(lowerCamelCase__ ) def _lowerCamelCase ( lowerCamelCase__ : Union[str, Any]=False ): lowercase__ : Any = get_version() if patch and default_version.is_devrelease: raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" ) if default_version.is_devrelease: lowercase__ : Dict = default_version.base_version elif patch: lowercase__ : Optional[int] = f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}''' else: lowercase__ : Dict = f'''{default_version.major}.{default_version.minor + 1}.0''' # Now let's ask nicely if that's the right one. lowercase__ : str = input(f'''Which version are you releasing? [{default_version}]''' ) if len(lowerCamelCase__ ) == 0: lowercase__ : Dict = default_version print(f'''Updating version to {version}.''' ) global_version_update(lowerCamelCase__ , patch=lowerCamelCase__ ) def _lowerCamelCase ( ): lowercase__ : Any = get_version() lowercase__ : Dict = f'''{current_version.major}.{current_version.minor + 1}.0.dev0''' lowercase__ : str = current_version.base_version # Check with the user we got that right. lowercase__ : Tuple = input(f'''Which version are we developing now? [{dev_version}]''' ) if len(lowerCamelCase__ ) == 0: lowercase__ : List[str] = dev_version print(f'''Updating version to {version}.''' ) global_version_update(lowerCamelCase__ ) # print("Cleaning main README, don't forget to run `make fix-copies`.") # clean_main_ref_in_model_list() if __name__ == "__main__": __snake_case = argparse.ArgumentParser() parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.') parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.') __snake_case = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('Nothing to do after a patch :-)') else: post_release_work()
128
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): """simple docstring""" _a : Tuple = '''philschmid/bart-large-cnn-samsum''' _a : Optional[Any] = ( '''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, ''' '''and returns a summary of the text.''' ) _a : Union[str, Any] = '''summarizer''' _a : List[Any] = AutoTokenizer _a : Optional[Any] = AutoModelForSeqaSeqLM _a : Any = ['''text'''] _a : List[str] = ['''text'''] def UpperCAmelCase__( self , lowerCamelCase__ ) -> List[str]: return self.pre_processor(lowerCamelCase__ , return_tensors="""pt""" , truncation=lowerCamelCase__ ) def UpperCAmelCase__( self , lowerCamelCase__ ) -> Dict: return self.model.generate(**lowerCamelCase__ )[0] def UpperCAmelCase__( self , lowerCamelCase__ ) -> int: return self.pre_processor.decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ )
128
1
"""simple docstring""" import argparse import json import math import os import time import traceback import zipfile from collections import Counter import requests def a__ ( lowerCAmelCase__ , lowerCAmelCase__=None ): UpperCAmelCase_ = None if token is not None: UpperCAmelCase_ = {"Accept": "application/vnd.github+json", "Authorization": f"""Bearer {token}"""} UpperCAmelCase_ = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100""" UpperCAmelCase_ = requests.get(lowerCAmelCase__ , headers=lowerCAmelCase__ ).json() UpperCAmelCase_ = {} try: job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} ) UpperCAmelCase_ = math.ceil((result["total_count"] - 100) / 100 ) for i in range(lowerCAmelCase__ ): UpperCAmelCase_ = requests.get(url + f"""&page={i + 2}""" , headers=lowerCAmelCase__ ).json() job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} ) return job_links except Exception: print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" ) return {} def a__ ( lowerCAmelCase__ , lowerCAmelCase__=None ): UpperCAmelCase_ = None if token is not None: UpperCAmelCase_ = {"Accept": "application/vnd.github+json", "Authorization": f"""Bearer {token}"""} UpperCAmelCase_ = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100""" UpperCAmelCase_ = requests.get(lowerCAmelCase__ , headers=lowerCAmelCase__ ).json() UpperCAmelCase_ = {} try: artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} ) UpperCAmelCase_ = math.ceil((result["total_count"] - 100) / 100 ) for i in range(lowerCAmelCase__ ): UpperCAmelCase_ = requests.get(url + f"""&page={i + 2}""" , headers=lowerCAmelCase__ ).json() artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} ) return artifacts except Exception: print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" ) return {} def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): UpperCAmelCase_ = None if token is not None: UpperCAmelCase_ = {"Accept": "application/vnd.github+json", "Authorization": f"""Bearer {token}"""} UpperCAmelCase_ = requests.get(lowerCAmelCase__ , headers=lowerCAmelCase__ , allow_redirects=lowerCAmelCase__ ) UpperCAmelCase_ = result.headers["Location"] UpperCAmelCase_ = requests.get(lowerCAmelCase__ , allow_redirects=lowerCAmelCase__ ) UpperCAmelCase_ = os.path.join(lowerCAmelCase__ , f"""{artifact_name}.zip""" ) with open(lowerCAmelCase__ , "wb" ) as fp: fp.write(response.content ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__=None ): UpperCAmelCase_ = [] UpperCAmelCase_ = [] UpperCAmelCase_ = None with zipfile.ZipFile(lowerCAmelCase__ ) as z: for filename in z.namelist(): if not os.path.isdir(lowerCAmelCase__ ): # read the file if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]: with z.open(lowerCAmelCase__ ) as f: for line in f: UpperCAmelCase_ = line.decode("UTF-8" ).strip() if filename == "failures_line.txt": try: # `error_line` is the place where `error` occurs UpperCAmelCase_ = line[: line.index(": " )] UpperCAmelCase_ = line[line.index(": " ) + len(": " ) :] errors.append([error_line, error] ) except Exception: # skip un-related lines pass elif filename == "summary_short.txt" and line.startswith("FAILED " ): # `test` is the test method that failed UpperCAmelCase_ = line[len("FAILED " ) :] failed_tests.append(lowerCAmelCase__ ) elif filename == "job_name.txt": UpperCAmelCase_ = line if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ): raise ValueError( f"""`errors` and `failed_tests` should have the same number of elements. Got {len(lowerCAmelCase__ )} for `errors` """ f"""and {len(lowerCAmelCase__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some""" " problem." ) UpperCAmelCase_ = None if job_name and job_links: UpperCAmelCase_ = job_links.get(lowerCAmelCase__ , lowerCAmelCase__ ) # A list with elements of the form (line of error, error, failed test) UpperCAmelCase_ = [x + [y] + [job_link] for x, y in zip(lowerCAmelCase__ , lowerCAmelCase__ )] return result def a__ ( lowerCAmelCase__ , lowerCAmelCase__=None ): UpperCAmelCase_ = [] UpperCAmelCase_ = [os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) for p in os.listdir(lowerCAmelCase__ ) if p.endswith(".zip" )] for p in paths: errors.extend(get_errors_from_single_artifact(lowerCAmelCase__ , job_links=lowerCAmelCase__ ) ) return errors def a__ ( lowerCAmelCase__ , lowerCAmelCase__=None ): UpperCAmelCase_ = Counter() counter.update([x[1] for x in logs] ) UpperCAmelCase_ = counter.most_common() UpperCAmelCase_ = {} for error, count in counts: if error_filter is None or error not in error_filter: UpperCAmelCase_ = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]} UpperCAmelCase_ = dict(sorted(r.items() , key=lambda lowerCAmelCase__ : item[1]["count"] , reverse=lowerCAmelCase__ ) ) return r def a__ ( lowerCAmelCase__ ): UpperCAmelCase_ = test.split("::" )[0] if test.startswith("tests/models/" ): UpperCAmelCase_ = test.split("/" )[2] else: UpperCAmelCase_ = None return test def a__ ( lowerCAmelCase__ , lowerCAmelCase__=None ): UpperCAmelCase_ = [(x[0], x[1], get_model(x[2] )) for x in logs] UpperCAmelCase_ = [x for x in logs if x[2] is not None] UpperCAmelCase_ = {x[2] for x in logs} UpperCAmelCase_ = {} for test in tests: UpperCAmelCase_ = Counter() # count by errors in `test` counter.update([x[1] for x in logs if x[2] == test] ) UpperCAmelCase_ = counter.most_common() UpperCAmelCase_ = {error: count for error, count in counts if (error_filter is None or error not in error_filter)} UpperCAmelCase_ = sum(error_counts.values() ) if n_errors > 0: UpperCAmelCase_ = {"count": n_errors, "errors": error_counts} UpperCAmelCase_ = dict(sorted(r.items() , key=lambda lowerCAmelCase__ : item[1]["count"] , reverse=lowerCAmelCase__ ) ) return r def a__ ( lowerCAmelCase__ ): UpperCAmelCase_ = "| no. | error | status |" UpperCAmelCase_ = "|-:|:-|:-|" UpperCAmelCase_ = [header, sep] for error in reduced_by_error: UpperCAmelCase_ = reduced_by_error[error]["count"] UpperCAmelCase_ = f"""| {count} | {error[:100]} | |""" lines.append(lowerCAmelCase__ ) return "\n".join(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ ): UpperCAmelCase_ = "| model | no. of errors | major error | count |" UpperCAmelCase_ = "|-:|-:|-:|-:|" UpperCAmelCase_ = [header, sep] for model in reduced_by_model: UpperCAmelCase_ = reduced_by_model[model]["count"] UpperCAmelCase_ , UpperCAmelCase_ = list(reduced_by_model[model]["errors"].items() )[0] UpperCAmelCase_ = f"""| {model} | {count} | {error[:60]} | {_count} |""" lines.append(lowerCAmelCase__ ) return "\n".join(lowerCAmelCase__ ) if __name__ == "__main__": lowerCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""") parser.add_argument( """--output_dir""", type=str, required=True, help="""Where to store the downloaded artifacts and other result files.""", ) parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""") lowerCamelCase = parser.parse_args() os.makedirs(args.output_dir, exist_ok=True) lowerCamelCase = get_job_links(args.workflow_run_id, token=args.token) lowerCamelCase = {} # To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee. # For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`. if _job_links: for k, v in _job_links.items(): # This is how GitHub actions combine job names. if " / " in k: lowerCamelCase = k.find(""" / """) lowerCamelCase = k[index + len(""" / """) :] lowerCamelCase = v with open(os.path.join(args.output_dir, """job_links.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(job_links, fp, ensure_ascii=False, indent=4) lowerCamelCase = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) for idx, (name, url) in enumerate(artifacts.items()): download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) lowerCamelCase = get_all_errors(args.output_dir, job_links=job_links) # `e[1]` is the error lowerCamelCase = Counter() counter.update([e[1] for e in errors]) # print the top 30 most common test errors lowerCamelCase = counter.most_common(30) for item in most_common: print(item) with open(os.path.join(args.output_dir, """errors.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(errors, fp, ensure_ascii=False, indent=4) lowerCamelCase = reduce_by_error(errors) lowerCamelCase = reduce_by_model(errors) lowerCamelCase = make_github_table(reduced_by_error) lowerCamelCase = make_github_table_per_model(reduced_by_model) with open(os.path.join(args.output_dir, """reduced_by_error.txt"""), """w""", encoding="""UTF-8""") as fp: fp.write(sa) with open(os.path.join(args.output_dir, """reduced_by_model.txt"""), """w""", encoding="""UTF-8""") as fp: fp.write(sa)
82
'''simple docstring''' import math from typing import Callable, List, Optional, Union import numpy as np import PIL import torch from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler def A_ ( _lowerCamelCase : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict=[] ): _lowerCAmelCase = size[0] - overlap_pixels * 2 _lowerCAmelCase = size[1] - overlap_pixels * 2 for letter in ["l", "r"]: if letter in remove_borders: size_x += overlap_pixels for letter in ["t", "b"]: if letter in remove_borders: size_y += overlap_pixels _lowerCAmelCase = np.ones((size_y, size_x) , dtype=np.uinta ) * 255 _lowerCAmelCase = np.pad(_lowerCamelCase , mode='linear_ramp' , pad_width=_lowerCamelCase , end_values=0 ) if "l" in remove_borders: _lowerCAmelCase = mask[:, overlap_pixels : mask.shape[1]] if "r" in remove_borders: _lowerCAmelCase = mask[:, 0 : mask.shape[1] - overlap_pixels] if "t" in remove_borders: _lowerCAmelCase = mask[overlap_pixels : mask.shape[0], :] if "b" in remove_borders: _lowerCAmelCase = mask[0 : mask.shape[0] - overlap_pixels, :] return mask def A_ ( _lowerCamelCase : Any , _lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any] ): return max(_lowerCamelCase , min(_lowerCamelCase , _lowerCamelCase ) ) def A_ ( _lowerCamelCase : [int] , _lowerCamelCase : [int] , _lowerCamelCase : [int] ): return ( clamp(rect[0] , min[0] , max[0] ), clamp(rect[1] , min[1] , max[1] ), clamp(rect[2] , min[0] , max[0] ), clamp(rect[3] , min[1] , max[1] ), ) def A_ ( _lowerCamelCase : [int] , _lowerCamelCase : int , _lowerCamelCase : [int] ): _lowerCAmelCase = list(_lowerCamelCase ) rect[0] -= overlap rect[1] -= overlap rect[2] += overlap rect[3] += overlap _lowerCAmelCase = clamp_rect(_lowerCamelCase , [0, 0] , [image_size[0], image_size[1]] ) return rect def A_ ( _lowerCamelCase : Any , _lowerCamelCase : Optional[int] , _lowerCamelCase : Any , _lowerCamelCase : List[Any] ): _lowerCAmelCase = Image.new('RGB' , (tile.size[0] + original_slice, tile.size[1]) ) result.paste( original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop( (slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , ) result.paste(_lowerCamelCase , (original_slice, 0) ) return result def A_ ( _lowerCamelCase : str , _lowerCamelCase : str ): _lowerCAmelCase = (original_image_slice * 4, 0, tile.size[0], tile.size[1]) _lowerCAmelCase = tile.crop(_lowerCamelCase ) return tile def A_ ( _lowerCamelCase : Dict , _lowerCamelCase : Tuple ): _lowerCAmelCase = n % d return n - divisor class SCREAMING_SNAKE_CASE ( __a ): """simple docstring""" def __init__( self : Optional[int] , __lowerCAmelCase : AutoencoderKL , __lowerCAmelCase : CLIPTextModel , __lowerCAmelCase : CLIPTokenizer , __lowerCAmelCase : UNetaDConditionModel , __lowerCAmelCase : DDPMScheduler , __lowerCAmelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __lowerCAmelCase : int = 350 , ): """simple docstring""" super().__init__( vae=__lowerCAmelCase , text_encoder=__lowerCAmelCase , tokenizer=__lowerCAmelCase , unet=__lowerCAmelCase , low_res_scheduler=__lowerCAmelCase , scheduler=__lowerCAmelCase , max_noise_level=__lowerCAmelCase , ) def a ( self : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Tuple ): """simple docstring""" torch.manual_seed(0 ) _lowerCAmelCase = ( min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ), min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ), min(image.size[0] , (x + 1) * tile_size ), min(image.size[1] , (y + 1) * tile_size ), ) _lowerCAmelCase = add_overlap_rect(__lowerCAmelCase , __lowerCAmelCase , image.size ) _lowerCAmelCase = image.crop(__lowerCAmelCase ) _lowerCAmelCase = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0] _lowerCAmelCase = translated_slice_x - (original_image_slice / 2) _lowerCAmelCase = max(0 , __lowerCAmelCase ) _lowerCAmelCase = squeeze_tile(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) _lowerCAmelCase = to_input.size _lowerCAmelCase = to_input.resize((tile_size, tile_size) , Image.BICUBIC ) _lowerCAmelCase = super(__lowerCAmelCase , self ).__call__(image=__lowerCAmelCase , **__lowerCAmelCase ).images[0] _lowerCAmelCase = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC ) _lowerCAmelCase = unsqueeze_tile(__lowerCAmelCase , __lowerCAmelCase ) _lowerCAmelCase = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC ) _lowerCAmelCase = [] if x == 0: remove_borders.append('l' ) elif crop_rect[2] == image.size[0]: remove_borders.append('r' ) if y == 0: remove_borders.append('t' ) elif crop_rect[3] == image.size[1]: remove_borders.append('b' ) _lowerCAmelCase = Image.fromarray( make_transparency_mask( (upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__lowerCAmelCase ) , mode='L' , ) final_image.paste( __lowerCAmelCase , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __lowerCAmelCase ) @torch.no_grad() def __call__( self : int , __lowerCAmelCase : Union[str, List[str]] , __lowerCAmelCase : Union[PIL.Image.Image, List[PIL.Image.Image]] , __lowerCAmelCase : int = 75 , __lowerCAmelCase : float = 9.0 , __lowerCAmelCase : int = 50 , __lowerCAmelCase : Optional[Union[str, List[str]]] = None , __lowerCAmelCase : Optional[int] = 1 , __lowerCAmelCase : float = 0.0 , __lowerCAmelCase : Optional[torch.Generator] = None , __lowerCAmelCase : Optional[torch.FloatTensor] = None , __lowerCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __lowerCAmelCase : int = 1 , __lowerCAmelCase : int = 128 , __lowerCAmelCase : int = 32 , __lowerCAmelCase : int = 32 , ): """simple docstring""" _lowerCAmelCase = Image.new('RGB' , (image.size[0] * 4, image.size[1] * 4) ) _lowerCAmelCase = math.ceil(image.size[0] / tile_size ) _lowerCAmelCase = math.ceil(image.size[1] / tile_size ) _lowerCAmelCase = tcx * tcy _lowerCAmelCase = 0 for y in range(__lowerCAmelCase ): for x in range(__lowerCAmelCase ): self._process_tile( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , prompt=__lowerCAmelCase , num_inference_steps=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , noise_level=__lowerCAmelCase , negative_prompt=__lowerCAmelCase , num_images_per_prompt=__lowerCAmelCase , eta=__lowerCAmelCase , generator=__lowerCAmelCase , latents=__lowerCAmelCase , ) current_count += 1 if callback is not None: callback({'progress': current_count / total_tile_count, 'image': final_image} ) return final_image def A_ ( ): # Run a demo _lowerCAmelCase = 'stabilityai/stable-diffusion-x4-upscaler' _lowerCAmelCase = StableDiffusionTiledUpscalePipeline.from_pretrained(_lowerCamelCase , revision='fp16' , torch_dtype=torch.floataa ) _lowerCAmelCase = pipe.to('cuda' ) _lowerCAmelCase = Image.open('../../docs/source/imgs/diffusers_library.jpg' ) def callback(_lowerCamelCase : Any ): print(F"progress: {obj['progress']:.4f}" ) obj["image"].save('diffusers_library_progress.jpg' ) _lowerCAmelCase = pipe(image=_lowerCamelCase , prompt='Black font, white background, vector' , noise_level=40 , callback=_lowerCamelCase ) final_image.save('diffusers_library.jpg' ) if __name__ == "__main__": main()
309
0
"""simple docstring""" import argparse import json import os import re import shutil import torch from transformers import BioGptConfig, BioGptForCausalLM from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() a :Optional[int] = 2 class __a : '''simple docstring''' def __init__( self , *, # begin keyword-only arguments _a="<s>" , _a="<pad>" , _a="</s>" , _a="<unk>" , _a=None , ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = bos, unk, pad, eos SCREAMING_SNAKE_CASE__ : Optional[int] = [] SCREAMING_SNAKE_CASE__ : List[str] = [] SCREAMING_SNAKE_CASE__ : str = {} SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.add_symbol(_a ) SCREAMING_SNAKE_CASE__ : Tuple = self.add_symbol(_a ) SCREAMING_SNAKE_CASE__ : Optional[Any] = self.add_symbol(_a ) SCREAMING_SNAKE_CASE__ : Optional[int] = self.add_symbol(_a ) if extra_special_symbols: for s in extra_special_symbols: self.add_symbol(_a ) SCREAMING_SNAKE_CASE__ : List[Any] = len(self.symbols ) def __eq__( self , _a ) -> Union[str, Any]: """simple docstring""" return self.indices == other.indices def __getitem__( self , _a ) -> List[str]: """simple docstring""" if idx < len(self.symbols ): return self.symbols[idx] return self.unk_word def __len__( self ) -> str: """simple docstring""" return len(self.symbols ) def __contains__( self , _a ) -> Optional[Any]: """simple docstring""" return sym in self.indices @classmethod def _a ( cls , _a ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = cls() d.add_from_file(_a ) return d def _a ( self , _a , _a=1 , _a=False ) -> Optional[Any]: """simple docstring""" if word in self.indices and not overwrite: SCREAMING_SNAKE_CASE__ : str = self.indices[word] SCREAMING_SNAKE_CASE__ : Any = self.count[idx] + n return idx else: SCREAMING_SNAKE_CASE__ : List[str] = len(self.symbols ) SCREAMING_SNAKE_CASE__ : List[str] = idx self.symbols.append(_a ) self.count.append(_a ) return idx def _a ( self , _a ) -> Tuple: """simple docstring""" return 0 def _a ( self , _a ) -> Tuple: """simple docstring""" if isinstance(_a , _a ): try: with open(_a , """r""" , encoding="""utf-8""" ) as fd: self.add_from_file(_a ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception("""Incorrect encoding detected in {}, please rebuild the dataset""".format(_a ) ) return SCREAMING_SNAKE_CASE__ : Any = f.readlines() SCREAMING_SNAKE_CASE__ : Dict = self._load_meta(_a ) for line in lines[indices_start_line:]: try: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = line.rstrip().rsplit(""" """ , 1 ) if field == "#fairseq:overwrite": SCREAMING_SNAKE_CASE__ : List[Any] = True SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = line.rsplit(""" """ , 1 ) else: SCREAMING_SNAKE_CASE__ : Tuple = False SCREAMING_SNAKE_CASE__ : str = int(_a ) SCREAMING_SNAKE_CASE__ : List[str] = line if word in self and not overwrite: raise RuntimeError( """Duplicate word found when loading Dictionary: '{}'. """ """Duplicate words can overwrite earlier ones by adding the """ """#fairseq:overwrite flag at the end of the corresponding row """ """in the dictionary file. If using the Camembert model, please """ """download an updated copy of the model file.""".format(_a ) ) self.add_symbol(_a , n=_a , overwrite=_a ) except ValueError: raise ValueError("""Incorrect dictionary format, expected '<token> <cnt> [flags]'""" ) def _lowercase ( __lowerCAmelCase ) -> Dict: # (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up, # e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7} SCREAMING_SNAKE_CASE__ : int = dict((re.sub(r"""@@$""" , """""" , __lowerCAmelCase ), v) if k.endswith("""@@""" ) else (re.sub(r"""$""" , """</w>""" , __lowerCAmelCase ), v) for k, v in d.items() ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = """<s> <pad> </s> <unk>""".split() # restore the special tokens for k in keep_keys: del da[F'''{k}</w>'''] SCREAMING_SNAKE_CASE__ : str = d[k] # restore return da def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]: # prep if not os.path.exists(__lowerCAmelCase ): raise ValueError(F'''path {biogpt_checkpoint_path} does not exist!''' ) os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase ) print(F'''Writing results to {pytorch_dump_folder_path}''' ) # handle various types of models SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(__lowerCAmelCase , """checkpoint.pt""" ) if not os.path.isfile(__lowerCAmelCase ): raise ValueError(F'''path to the file {checkpoint_file} does not exist!''' ) SCREAMING_SNAKE_CASE__ : Tuple = torch.load(__lowerCAmelCase , map_location="""cpu""" ) SCREAMING_SNAKE_CASE__ : Dict = chkpt["""cfg"""]["""model"""] # dicts SCREAMING_SNAKE_CASE__ : Any = os.path.join(__lowerCAmelCase , """dict.txt""" ) if not os.path.isfile(__lowerCAmelCase ): raise ValueError(F'''path to the file {dict_file} does not exist!''' ) SCREAMING_SNAKE_CASE__ : int = Dictionary.load(__lowerCAmelCase ) SCREAMING_SNAKE_CASE__ : List[Any] = rewrite_dict_keys(src_dict.indices ) SCREAMING_SNAKE_CASE__ : Tuple = len(__lowerCAmelCase ) SCREAMING_SNAKE_CASE__ : int = os.path.join(__lowerCAmelCase , VOCAB_FILES_NAMES["""vocab_file"""] ) print(F'''Generating {src_vocab_file} of {src_vocab_size} records''' ) with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(__lowerCAmelCase , ensure_ascii=__lowerCAmelCase , indent=__lowerCAmelCase ) ) # merges_file (bpecodes) SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(__lowerCAmelCase , """bpecodes""" ) if not os.path.isfile(__lowerCAmelCase ): raise ValueError(F'''path to the file {bpecodes_file} does not exist!''' ) SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(__lowerCAmelCase , VOCAB_FILES_NAMES["""merges_file"""] ) shutil.copyfile(__lowerCAmelCase , __lowerCAmelCase ) # model config SCREAMING_SNAKE_CASE__ : Any = os.path.join(__lowerCAmelCase , """config.json""" ) SCREAMING_SNAKE_CASE__ : List[Any] = { """activation_dropout""": args["""activation_dropout"""], """architectures""": ["""BioGptForCausalLM"""], """attention_probs_dropout_prob""": args["""attention_dropout"""], """bos_token_id""": 0, """eos_token_id""": 2, """hidden_act""": args["""activation_fn"""], """hidden_dropout_prob""": args["""dropout"""], """hidden_size""": args["""decoder_embed_dim"""], """initializer_range""": 0.02, """intermediate_size""": args["""decoder_ffn_embed_dim"""], """layer_norm_eps""": 1E-12, """layerdrop""": args["""decoder_layerdrop"""], """max_position_embeddings""": args["""max_target_positions"""], """model_type""": """biogpt""", """num_attention_heads""": args["""decoder_attention_heads"""], """num_hidden_layers""": args["""decoder_layers"""], """pad_token_id""": 1, """scale_embedding""": not args["""no_scale_embedding"""], """tie_word_embeddings""": args["""share_decoder_input_output_embed"""], """vocab_size""": src_vocab_size, } # good hparam defaults to start with print(F'''Generating {biogpt_model_config_file}''' ) with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(__lowerCAmelCase , ensure_ascii=__lowerCAmelCase , indent=__lowerCAmelCase ) ) # tokenizer config SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(__lowerCAmelCase , __lowerCAmelCase ) SCREAMING_SNAKE_CASE__ : str = { """bos_token""": """<s>""", """eos_token""": """</s>""", """model_max_length""": 1024, """pad_token""": """<pad>""", """special_tokens_map_file""": None, """tokenizer_class""": """BioGptTokenizer""", """unk_token""": """<unk>""", } print(F'''Generating {biogpt_tokenizer_config_file}''' ) with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(__lowerCAmelCase , ensure_ascii=__lowerCAmelCase , indent=__lowerCAmelCase ) ) # model SCREAMING_SNAKE_CASE__ : Union[str, Any] = chkpt["""model"""] # remove unneeded keys SCREAMING_SNAKE_CASE__ : Tuple = [ """decoder.version""", ] for k in ignore_keys: model_state_dict.pop(__lowerCAmelCase , __lowerCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[int] = list(model_state_dict.keys() ) for layer_name in layer_names: if layer_name.endswith("""output_projection.weight""" ): SCREAMING_SNAKE_CASE__ : Tuple = model_state_dict.pop(__lowerCAmelCase ) else: SCREAMING_SNAKE_CASE__ : List[str] = model_state_dict.pop(__lowerCAmelCase ) SCREAMING_SNAKE_CASE__ : str = BioGptConfig.from_pretrained(__lowerCAmelCase ) SCREAMING_SNAKE_CASE__ : Tuple = BioGptForCausalLM(__lowerCAmelCase ) # check that it loads ok model_new.load_state_dict(__lowerCAmelCase ) # save SCREAMING_SNAKE_CASE__ : int = os.path.join(__lowerCAmelCase , __lowerCAmelCase ) print(F'''Generating {pytorch_weights_dump_path}''' ) torch.save(__lowerCAmelCase , __lowerCAmelCase ) print("""Conversion is done!""" ) if __name__ == "__main__": a :Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--biogpt_checkpoint_path", default=None, type=str, required=True, help=( "Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts," " bpecodes, etc." ), ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) a :Dict = parser.parse_args() convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
12
"""simple docstring""" from math import sqrt def _lowercase ( __lowerCAmelCase ) -> bool: if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(sqrt(__lowerCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def _lowercase ( __lowerCAmelCase = 1_0001 ) -> int: SCREAMING_SNAKE_CASE__ : Dict = 0 SCREAMING_SNAKE_CASE__ : Tuple = 1 while count != nth and number < 3: number += 1 if is_prime(__lowerCAmelCase ): count += 1 while count != nth: number += 2 if is_prime(__lowerCAmelCase ): count += 1 return number if __name__ == "__main__": print(f'{solution() = }')
12
1
import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging UpperCAmelCase_ = logging.get_logger(__name__) def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> int: """simple docstring""" _UpperCAmelCase = R'''\w+[.]\d+''' _UpperCAmelCase = re.findall(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for pat in pats: _UpperCAmelCase = key.replace(SCREAMING_SNAKE_CASE_ , '''_'''.join(pat.split('''.''' ) ) ) return key def A__ ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Tuple: """simple docstring""" _UpperCAmelCase = pt_tuple_key[:-1] + ('''scale''',) if ( any('''norm''' in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): _UpperCAmelCase = pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: _UpperCAmelCase = pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: _UpperCAmelCase = pt_tuple_key[:-1] + ('''embedding''',) return renamed_pt_tuple_key, pt_tensor # conv layer _UpperCAmelCase = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: _UpperCAmelCase = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer _UpperCAmelCase = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight": _UpperCAmelCase = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight _UpperCAmelCase = pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias _UpperCAmelCase = pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def A__ ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Dict=42 ) -> List[Any]: """simple docstring""" _UpperCAmelCase = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params _UpperCAmelCase = flax_model.init_weights(PRNGKey(SCREAMING_SNAKE_CASE_ ) ) _UpperCAmelCase = flatten_dict(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): _UpperCAmelCase = rename_key(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = tuple(renamed_pt_key.split('''.''' ) ) # Correctly rename weight parameters _UpperCAmelCase , _UpperCAmelCase = rename_key_and_reshape_tensor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # also add unexpected weight so that warning is thrown _UpperCAmelCase = jnp.asarray(SCREAMING_SNAKE_CASE_ ) return unflatten_dict(SCREAMING_SNAKE_CASE_ )
32
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { "tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json", "tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json", } class __UpperCamelCase ( A__ ): __A : Dict = """falcon""" __A : Any = ["""past_key_values"""] def __init__( self , _UpperCamelCase=65024 , _UpperCamelCase=4544 , _UpperCamelCase=32 , _UpperCamelCase=71 , _UpperCamelCase=1e-5 , _UpperCamelCase=0.02 , _UpperCamelCase=True , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=None , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase=11 , _UpperCamelCase=11 , **_UpperCamelCase , ): _UpperCAmelCase = vocab_size # Backward compatibility with n_embed kwarg _UpperCAmelCase = kwargs.pop('''n_embed''' , _UpperCamelCase ) _UpperCAmelCase = hidden_size if n_embed is None else n_embed _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = layer_norm_epsilon _UpperCAmelCase = initializer_range _UpperCAmelCase = use_cache _UpperCAmelCase = hidden_dropout _UpperCAmelCase = attention_dropout _UpperCAmelCase = bos_token_id _UpperCAmelCase = eos_token_id _UpperCAmelCase = num_attention_heads if num_kv_heads is None else num_kv_heads _UpperCAmelCase = alibi _UpperCAmelCase = new_decoder_architecture _UpperCAmelCase = multi_query # Ignored when new_decoder_architecture is True _UpperCAmelCase = parallel_attn _UpperCAmelCase = bias super().__init__(bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase ) @property def UpperCamelCase( self ): return self.hidden_size // self.num_attention_heads @property def UpperCamelCase( self ): return not self.alibi
32
1
from typing import Union import fire import torch from tqdm import tqdm def SCREAMING_SNAKE_CASE__ ( snake_case__ :str , snake_case__ :str = "cpu" , snake_case__ :Union[str, None] = None ) -> None: _lowercase = torch.load(snake_case__ , map_location=snake_case__ ) for k, v in tqdm(state_dict.items() ): if not isinstance(snake_case__ , torch.Tensor ): raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin' ) _lowercase = v.half() if save_path is None: # overwrite src_path _lowercase = src_path torch.save(snake_case__ , snake_case__ ) if __name__ == "__main__": fire.Fire(convert)
718
import string import numpy def SCREAMING_SNAKE_CASE__ ( snake_case__ :int , snake_case__ :int ) -> int: return b if a == 0 else greatest_common_divisor(b % a , snake_case__ ) class A_ : """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = string.ascii_uppercase + string.digits # This cipher takes alphanumerics into account # i.e. a total of 36 characters # take x and return x % len(key_string) SCREAMING_SNAKE_CASE_ : Dict = numpy.vectorize(lambda UpperCAmelCase : x % 3_6 ) SCREAMING_SNAKE_CASE_ : List[Any] = numpy.vectorize(UpperCAmelCase ) def __init__( self : Optional[Any] ,__A : numpy.ndarray ) -> None: _lowercase = self.modulus(__A ) # mod36 calc's on the encrypt key self.check_determinant() # validate the determinant of the encryption key _lowercase = encrypt_key.shape[0] def __UpperCAmelCase ( self : Tuple ,__A : str ) -> int: return self.key_string.index(__A ) def __UpperCAmelCase ( self : Optional[int] ,__A : int ) -> str: return self.key_string[round(__A )] def __UpperCAmelCase ( self : str ) -> None: _lowercase = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: _lowercase = det % len(self.key_string ) _lowercase = len(self.key_string ) if greatest_common_divisor(__A ,len(self.key_string ) ) != 1: _lowercase = ( F"""determinant modular {req_l} of encryption key({det}) """ F"""is not co prime w.r.t {req_l}.\nTry another key.""" ) raise ValueError(__A ) def __UpperCAmelCase ( self : Any ,__A : str ) -> str: _lowercase = [char for char in text.upper() if char in self.key_string] _lowercase = chars[-1] while len(__A ) % self.break_key != 0: chars.append(__A ) return "".join(__A ) def __UpperCAmelCase ( self : Optional[int] ,__A : str ) -> str: _lowercase = self.process_text(text.upper() ) _lowercase = '' for i in range(0 ,len(__A ) - self.break_key + 1 ,self.break_key ): _lowercase = text[i : i + self.break_key] _lowercase = [self.replace_letters(__A ) for char in batch] _lowercase = numpy.array([vec] ).T _lowercase = self.modulus(self.encrypt_key.dot(__A ) ).T.tolist()[ 0 ] _lowercase = ''.join( self.replace_digits(__A ) for num in batch_encrypted ) encrypted += encrypted_batch return encrypted def __UpperCAmelCase ( self : List[Any] ) -> numpy.ndarray: _lowercase = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: _lowercase = det % len(self.key_string ) _lowercase = None for i in range(len(self.key_string ) ): if (det * i) % len(self.key_string ) == 1: _lowercase = i break _lowercase = ( det_inv * numpy.linalg.det(self.encrypt_key ) * numpy.linalg.inv(self.encrypt_key ) ) return self.to_int(self.modulus(__A ) ) def __UpperCAmelCase ( self : Tuple ,__A : str ) -> str: _lowercase = self.make_decrypt_key() _lowercase = self.process_text(text.upper() ) _lowercase = '' for i in range(0 ,len(__A ) - self.break_key + 1 ,self.break_key ): _lowercase = text[i : i + self.break_key] _lowercase = [self.replace_letters(__A ) for char in batch] _lowercase = numpy.array([vec] ).T _lowercase = self.modulus(decrypt_key.dot(__A ) ).T.tolist()[0] _lowercase = ''.join( self.replace_digits(__A ) for num in batch_decrypted ) decrypted += decrypted_batch return decrypted def SCREAMING_SNAKE_CASE__ ( ) -> None: _lowercase = int(input('Enter the order of the encryption key: ' ) ) _lowercase = [] print('Enter each row of the encryption key with space separated integers' ) for _ in range(snake_case__ ): _lowercase = [int(snake_case__ ) for x in input().split()] hill_matrix.append(snake_case__ ) _lowercase = HillCipher(numpy.array(snake_case__ ) ) print('Would you like to encrypt or decrypt some text? (1 or 2)' ) _lowercase = input('\n1. Encrypt\n2. Decrypt\n' ) if option == "1": _lowercase = input('What text would you like to encrypt?: ' ) print('Your encrypted text is:' ) print(hc.encrypt(snake_case__ ) ) elif option == "2": _lowercase = input('What text would you like to decrypt?: ' ) print('Your decrypted text is:' ) print(hc.decrypt(snake_case__ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
535
0
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class UpperCamelCase ( snake_case__ ): __UpperCamelCase = ["""image_processor""", """tokenizer"""] __UpperCamelCase = """LayoutLMv2ImageProcessor""" __UpperCamelCase = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""") def __init__( self : Optional[int] ,_lowerCAmelCase : Optional[int]=None ,_lowerCAmelCase : str=None ,**_lowerCAmelCase : Optional[int] ): """simple docstring""" if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." ,_lowerCAmelCase ,) __snake_case = kwargs.pop("feature_extractor" ) __snake_case = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(_lowerCAmelCase ,_lowerCAmelCase ) def __call__( self : Tuple ,_lowerCAmelCase : str ,_lowerCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,_lowerCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None ,_lowerCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None ,_lowerCAmelCase : Optional[Union[List[int], List[List[int]]]] = None ,_lowerCAmelCase : bool = True ,_lowerCAmelCase : Union[bool, str, PaddingStrategy] = False ,_lowerCAmelCase : Union[bool, str, TruncationStrategy] = None ,_lowerCAmelCase : Optional[int] = None ,_lowerCAmelCase : int = 0 ,_lowerCAmelCase : Optional[int] = None ,_lowerCAmelCase : Optional[bool] = None ,_lowerCAmelCase : Optional[bool] = None ,_lowerCAmelCase : bool = False ,_lowerCAmelCase : bool = False ,_lowerCAmelCase : bool = False ,_lowerCAmelCase : bool = False ,_lowerCAmelCase : bool = True ,_lowerCAmelCase : Optional[Union[str, TensorType]] = None ,**_lowerCAmelCase : Optional[Any] ,): """simple docstring""" if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( "You cannot provide bounding boxes " "if you initialized the image processor with apply_ocr set to True." ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( "You cannot provide word labels if you initialized the image processor with apply_ocr set to True." ) if return_overflowing_tokens is True and return_offsets_mapping is False: raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." ) # first, apply the image processor __snake_case = self.image_processor(images=_lowerCAmelCase ,return_tensors=_lowerCAmelCase ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(_lowerCAmelCase ,_lowerCAmelCase ): __snake_case = [text] # add batch dimension (as the image processor always adds a batch dimension) __snake_case = features["words"] __snake_case = self.tokenizer( text=text if text is not None else features["words"] ,text_pair=text_pair if text_pair is not None else None ,boxes=boxes if boxes is not None else features["boxes"] ,word_labels=_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase ,padding=_lowerCAmelCase ,truncation=_lowerCAmelCase ,max_length=_lowerCAmelCase ,stride=_lowerCAmelCase ,pad_to_multiple_of=_lowerCAmelCase ,return_token_type_ids=_lowerCAmelCase ,return_attention_mask=_lowerCAmelCase ,return_overflowing_tokens=_lowerCAmelCase ,return_special_tokens_mask=_lowerCAmelCase ,return_offsets_mapping=_lowerCAmelCase ,return_length=_lowerCAmelCase ,verbose=_lowerCAmelCase ,return_tensors=_lowerCAmelCase ,**_lowerCAmelCase ,) # add pixel values __snake_case = features.pop("pixel_values" ) if return_overflowing_tokens is True: __snake_case = self.get_overflowing_images(_lowerCAmelCase ,encoded_inputs["overflow_to_sample_mapping"] ) __snake_case = images return encoded_inputs def UpperCamelCase_ ( self : Union[str, Any] ,_lowerCAmelCase : Union[str, Any] ,_lowerCAmelCase : Union[str, Any] ): """simple docstring""" __snake_case = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(_lowerCAmelCase ) != len(_lowerCAmelCase ): raise ValueError( "Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got" F""" {len(_lowerCAmelCase )} and {len(_lowerCAmelCase )}""" ) return images_with_overflow def UpperCamelCase_ ( self : str ,*_lowerCAmelCase : Optional[Any] ,**_lowerCAmelCase : Any ): """simple docstring""" return self.tokenizer.batch_decode(*_lowerCAmelCase ,**_lowerCAmelCase ) def UpperCamelCase_ ( self : str ,*_lowerCAmelCase : List[Any] ,**_lowerCAmelCase : Optional[int] ): """simple docstring""" return self.tokenizer.decode(*_lowerCAmelCase ,**_lowerCAmelCase ) @property def UpperCamelCase_ ( self : Optional[int] ): """simple docstring""" return ["input_ids", "bbox", "attention_mask", "image"] @property def UpperCamelCase_ ( self : Tuple ): """simple docstring""" warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." ,_lowerCAmelCase ,) return self.image_processor_class @property def UpperCamelCase_ ( self : List[Any] ): """simple docstring""" warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." ,_lowerCAmelCase ,) return self.image_processor
524
lowerCamelCase__ = { 'Pillow': 'Pillow<10.0.0', 'accelerate': 'accelerate>=0.20.3', 'av': 'av==9.2.0', 'beautifulsoup4': 'beautifulsoup4', 'black': 'black~=23.1', 'codecarbon': 'codecarbon==1.2.0', 'cookiecutter': 'cookiecutter==1.7.3', 'dataclasses': 'dataclasses', 'datasets': 'datasets!=2.5.0', 'decord': 'decord==0.6.0', 'deepspeed': 'deepspeed>=0.9.3', 'diffusers': 'diffusers', 'dill': 'dill<0.3.5', 'evaluate': 'evaluate>=0.2.0', 'fairscale': 'fairscale>0.3', 'faiss-cpu': 'faiss-cpu', 'fastapi': 'fastapi', 'filelock': 'filelock', 'flax': 'flax>=0.4.1,<=0.7.0', 'ftfy': 'ftfy', 'fugashi': 'fugashi>=1.0', 'GitPython': 'GitPython<3.1.19', 'hf-doc-builder': 'hf-doc-builder>=0.3.0', 'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0', 'importlib_metadata': 'importlib_metadata', 'ipadic': 'ipadic>=1.0.0,<2.0', 'isort': 'isort>=5.5.4', 'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13', 'jaxlib': 'jaxlib>=0.1.65,<=0.4.13', 'jieba': 'jieba', 'kenlm': 'kenlm', 'keras-nlp': 'keras-nlp>=0.3.1', 'librosa': 'librosa', 'nltk': 'nltk', 'natten': 'natten>=0.14.6', 'numpy': 'numpy>=1.17', 'onnxconverter-common': 'onnxconverter-common', 'onnxruntime-tools': 'onnxruntime-tools>=1.4.2', 'onnxruntime': 'onnxruntime>=1.4.0', 'opencv-python': 'opencv-python', 'optuna': 'optuna', 'optax': 'optax>=0.0.8,<=0.1.4', 'packaging': 'packaging>=20.0', 'parameterized': 'parameterized', 'phonemizer': 'phonemizer', 'protobuf': 'protobuf', 'psutil': 'psutil', 'pyyaml': 'pyyaml>=5.1', 'pydantic': 'pydantic<2', 'pytest': 'pytest>=7.2.0', 'pytest-timeout': 'pytest-timeout', 'pytest-xdist': 'pytest-xdist', 'python': 'python>=3.8.0', 'ray[tune]': 'ray[tune]', 'regex': 'regex!=2019.12.17', 'requests': 'requests', 'rhoknp': 'rhoknp>=1.1.0,<1.3.1', 'rjieba': 'rjieba', 'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1', 'ruff': 'ruff>=0.0.241,<=0.0.259', 'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0', 'sacremoses': 'sacremoses', 'safetensors': 'safetensors>=0.3.1', 'sagemaker': 'sagemaker>=2.31.0', 'scikit-learn': 'scikit-learn', 'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92', 'sigopt': 'sigopt', 'starlette': 'starlette', 'sudachipy': 'sudachipy>=0.6.6', 'sudachidict_core': 'sudachidict_core>=20220729', 'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14', 'tensorflow': 'tensorflow>=2.6,<2.14', 'tensorflow-text': 'tensorflow-text<2.14', 'tf2onnx': 'tf2onnx', 'timeout-decorator': 'timeout-decorator', 'timm': 'timm', 'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14', 'torch': 'torch>=1.9,!=1.12.0', 'torchaudio': 'torchaudio', 'torchvision': 'torchvision', 'pyctcdecode': 'pyctcdecode>=0.4.0', 'tqdm': 'tqdm>=4.27', 'unidic': 'unidic>=1.0.2', 'unidic_lite': 'unidic_lite>=1.0.7', 'urllib3': 'urllib3<2.0.0', 'uvicorn': 'uvicorn', }
524
1
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging a_ :Union[str, Any] = logging.get_logger(__name__) a_ :Dict = {"vocab_file": "sentencepiece.bpe.model"} a_ :Union[str, Any] = { "vocab_file": { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model", } } a_ :Optional[Any] = { "camembert-base": 512, } a_ :List[str] = "▁" class snake_case__ ( lowerCAmelCase_ ): """simple docstring""" _SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES _SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP _SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask"""] def __init__( self : Any, _snake_case : List[str], _snake_case : Optional[int]="<s>", _snake_case : str="</s>", _snake_case : Optional[int]="</s>", _snake_case : List[str]="<s>", _snake_case : Optional[int]="<unk>", _snake_case : List[Any]="<pad>", _snake_case : str="<mask>", _snake_case : List[Any]=["<s>NOTUSED", "</s>NOTUSED"], _snake_case : Optional[Dict[str, Any]] = None, **_snake_case : List[str], ) ->None: # Mask token behave like a normal word, i.e. include the space before it snake_case__ : Optional[Any] = AddedToken(_snake_case, lstrip=_snake_case, rstrip=_snake_case ) if isinstance(_snake_case, _snake_case ) else mask_token snake_case__ : int = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_snake_case, eos_token=_snake_case, unk_token=_snake_case, sep_token=_snake_case, cls_token=_snake_case, pad_token=_snake_case, mask_token=_snake_case, additional_special_tokens=_snake_case, sp_model_kwargs=self.sp_model_kwargs, **_snake_case, ) snake_case__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_snake_case ) ) snake_case__ : List[Any] = vocab_file # HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual # sentencepiece vocabulary (this is the case for <s> and </s> snake_case__ : int = {'<s>NOTUSED': 0, '<pad>': 1, '</s>NOTUSED': 2, '<unk>': 3} snake_case__ : Optional[Any] = len(self.fairseq_tokens_to_ids ) snake_case__ : int = len(self.sp_model ) + len(self.fairseq_tokens_to_ids ) snake_case__ : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def lowercase_ ( self : Any, _snake_case : List[int], _snake_case : Optional[List[int]] = None ) ->List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] snake_case__ : Dict = [self.cls_token_id] snake_case__ : Optional[Any] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowercase_ ( self : Tuple, _snake_case : List[int], _snake_case : Optional[List[int]] = None, _snake_case : bool = False ) ->List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_snake_case, token_ids_a=_snake_case, already_has_special_tokens=_snake_case ) if token_ids_a is None: return [1] + ([0] * len(_snake_case )) + [1] return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case )) + [1] def lowercase_ ( self : Dict, _snake_case : List[int], _snake_case : Optional[List[int]] = None ) ->List[int]: snake_case__ : Tuple = [self.sep_token_id] snake_case__ : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def lowercase_ ( self : List[str] ) ->List[str]: return len(self.fairseq_tokens_to_ids ) + len(self.sp_model ) def lowercase_ ( self : str ) ->Union[str, Any]: snake_case__ : Dict = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowercase_ ( self : Optional[int], _snake_case : str ) ->List[str]: return self.sp_model.encode(_snake_case, out_type=_snake_case ) def lowercase_ ( self : List[Any], _snake_case : Optional[Any] ) ->Tuple: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] elif self.sp_model.PieceToId(_snake_case ) == 0: # Convert sentence piece unk token to fairseq unk token index return self.unk_token_id return self.fairseq_offset + self.sp_model.PieceToId(_snake_case ) def lowercase_ ( self : List[Any], _snake_case : int ) ->List[str]: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def lowercase_ ( self : Optional[Any], _snake_case : List[str] ) ->List[str]: snake_case__ : str = [] snake_case__ : int = '' snake_case__ : Union[str, Any] = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_snake_case ) + token snake_case__ : List[str] = True snake_case__ : List[str] = [] else: current_sub_tokens.append(_snake_case ) snake_case__ : Optional[Any] = False out_string += self.sp_model.decode(_snake_case ) return out_string.strip() def __getstate__( self : Any ) ->int: snake_case__ : str = self.__dict__.copy() snake_case__ : List[str] = None return state def __setstate__( self : Optional[Any], _snake_case : List[str] ) ->Any: snake_case__ : Union[str, Any] = d # for backward compatibility if not hasattr(self, 'sp_model_kwargs' ): snake_case__ : Tuple = {} snake_case__ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowercase_ ( self : Optional[Any], _snake_case : str, _snake_case : Optional[str] = None ) ->Tuple[str]: if not os.path.isdir(_snake_case ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case__ : Optional[int] = os.path.join( _snake_case, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file, _snake_case ) elif not os.path.isfile(self.vocab_file ): with open(_snake_case, 'wb' ) as fi: snake_case__ : Dict = self.sp_model.serialized_model_proto() fi.write(_snake_case ) return (out_vocab_file,)
243
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class snake_case__ ( lowerCAmelCase_ , unittest.TestCase ): """simple docstring""" _SCREAMING_SNAKE_CASE = KandinskyInpaintPipeline _SCREAMING_SNAKE_CASE = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""] _SCREAMING_SNAKE_CASE = [ """prompt""", """negative_prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image""", ] _SCREAMING_SNAKE_CASE = [ """generator""", """height""", """width""", """latents""", """guidance_scale""", """negative_prompt""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] _SCREAMING_SNAKE_CASE = False @property def lowercase_ ( self : Optional[Any] ) ->Optional[Any]: return 3_2 @property def lowercase_ ( self : int ) ->str: return 3_2 @property def lowercase_ ( self : Any ) ->List[str]: return self.time_input_dim @property def lowercase_ ( self : Optional[Any] ) ->str: return self.time_input_dim * 4 @property def lowercase_ ( self : Tuple ) ->int: return 1_0_0 @property def lowercase_ ( self : str ) ->Dict: snake_case__ : Union[str, Any] = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' ) return tokenizer @property def lowercase_ ( self : Any ) ->Optional[int]: torch.manual_seed(0 ) snake_case__ : str = MCLIPConfig( numDims=self.cross_attention_dim, transformerDimensions=self.text_embedder_hidden_size, hidden_size=self.text_embedder_hidden_size, intermediate_size=3_7, num_attention_heads=4, num_hidden_layers=5, vocab_size=1_0_0_5, ) snake_case__ : Optional[Any] = MultilingualCLIP(_snake_case ) snake_case__ : List[Any] = text_encoder.eval() return text_encoder @property def lowercase_ ( self : Tuple ) ->Optional[int]: torch.manual_seed(0 ) snake_case__ : Optional[Any] = { 'in_channels': 9, # Out channels is double in channels because predicts mean and variance 'out_channels': 8, 'addition_embed_type': 'text_image', 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'encoder_hid_dim': self.text_embedder_hidden_size, 'encoder_hid_dim_type': 'text_image_proj', 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': None, } snake_case__ : Dict = UNetaDConditionModel(**_snake_case ) return model @property def lowercase_ ( self : Dict ) ->Optional[int]: return { "block_out_channels": [3_2, 6_4], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 1_2, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def lowercase_ ( self : Union[str, Any] ) ->List[Any]: torch.manual_seed(0 ) snake_case__ : Optional[int] = VQModel(**self.dummy_movq_kwargs ) return model def lowercase_ ( self : Any ) ->Any: snake_case__ : int = self.dummy_text_encoder snake_case__ : str = self.dummy_tokenizer snake_case__ : Any = self.dummy_unet snake_case__ : Tuple = self.dummy_movq snake_case__ : int = DDIMScheduler( num_train_timesteps=1_0_0_0, beta_schedule='linear', beta_start=0.0_0_0_8_5, beta_end=0.0_1_2, clip_sample=_snake_case, set_alpha_to_one=_snake_case, steps_offset=1, prediction_type='epsilon', thresholding=_snake_case, ) snake_case__ : Optional[int] = { 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'unet': unet, 'scheduler': scheduler, 'movq': movq, } return components def lowercase_ ( self : str, _snake_case : Any, _snake_case : int=0 ) ->str: snake_case__ : Union[str, Any] = floats_tensor((1, self.cross_attention_dim), rng=random.Random(_snake_case ) ).to(_snake_case ) snake_case__ : str = floats_tensor((1, self.cross_attention_dim), rng=random.Random(seed + 1 ) ).to(_snake_case ) # create init_image snake_case__ : Tuple = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(_snake_case ) ).to(_snake_case ) snake_case__ : Optional[Any] = image.cpu().permute(0, 2, 3, 1 )[0] snake_case__ : Tuple = Image.fromarray(np.uinta(_snake_case ) ).convert('RGB' ).resize((2_5_6, 2_5_6) ) # create mask snake_case__ : Any = np.ones((6_4, 6_4), dtype=np.floataa ) snake_case__ : Optional[Any] = 0 if str(_snake_case ).startswith('mps' ): snake_case__ : Union[str, Any] = torch.manual_seed(_snake_case ) else: snake_case__ : Any = torch.Generator(device=_snake_case ).manual_seed(_snake_case ) snake_case__ : int = { 'prompt': 'horse', 'image': init_image, 'mask_image': mask, 'image_embeds': image_embeds, 'negative_image_embeds': negative_image_embeds, 'generator': generator, 'height': 6_4, 'width': 6_4, 'num_inference_steps': 2, 'guidance_scale': 4.0, 'output_type': 'np', } return inputs def lowercase_ ( self : Optional[int] ) ->Optional[Any]: snake_case__ : int = 'cpu' snake_case__ : str = self.get_dummy_components() snake_case__ : Any = self.pipeline_class(**_snake_case ) snake_case__ : Optional[Any] = pipe.to(_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) snake_case__ : Tuple = pipe(**self.get_dummy_inputs(_snake_case ) ) snake_case__ : List[Any] = output.images snake_case__ : List[Any] = pipe( **self.get_dummy_inputs(_snake_case ), return_dict=_snake_case, )[0] snake_case__ : Optional[int] = image[0, -3:, -3:, -1] snake_case__ : int = image_from_tuple[0, -3:, -3:, -1] print(F'''image.shape {image.shape}''' ) assert image.shape == (1, 6_4, 6_4, 3) snake_case__ : Any = np.array( [0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' def lowercase_ ( self : Any ) ->List[Any]: super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class snake_case__ ( unittest.TestCase ): """simple docstring""" def lowercase_ ( self : Dict ) ->int: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase_ ( self : Optional[int] ) ->List[str]: snake_case__ : int = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy' ) snake_case__ : Any = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' ) snake_case__ : Union[str, Any] = np.ones((7_6_8, 7_6_8), dtype=np.floataa ) snake_case__ : str = 0 snake_case__ : List[str] = 'a hat' snake_case__ : Any = KandinskyPriorPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-1-prior', torch_dtype=torch.floataa ) pipe_prior.to(_snake_case ) snake_case__ : Union[str, Any] = KandinskyInpaintPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-1-inpaint', torch_dtype=torch.floataa ) snake_case__ : Tuple = pipeline.to(_snake_case ) pipeline.set_progress_bar_config(disable=_snake_case ) snake_case__ : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 ) snake_case__ , snake_case__ : Tuple = pipe_prior( _snake_case, generator=_snake_case, num_inference_steps=5, negative_prompt='', ).to_tuple() snake_case__ : Optional[Any] = pipeline( _snake_case, image=_snake_case, mask_image=_snake_case, image_embeds=_snake_case, negative_image_embeds=_snake_case, generator=_snake_case, num_inference_steps=1_0_0, height=7_6_8, width=7_6_8, output_type='np', ) snake_case__ : Dict = output.images[0] assert image.shape == (7_6_8, 7_6_8, 3) assert_mean_pixel_difference(_snake_case, _snake_case )
243
1
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('''TEST_SAGEMAKER''', '''False''' ) ) is not True, reason='''Skipping test because should only be run when releasing minor transformers version''', ) @pytest.mark.usefixtures('''sm_env''' ) @parameterized_class( [ { '''framework''': '''pytorch''', '''script''': '''run_glue.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.p3.16xlarge''', '''results''': {'''train_runtime''': 650, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6}, }, { '''framework''': '''pytorch''', '''script''': '''run_ddp.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.p3.16xlarge''', '''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6}, }, { '''framework''': '''tensorflow''', '''script''': '''run_tf_dist.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.p3.16xlarge''', '''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.6, '''eval_loss''': 0.7}, }, ] ) class __lowerCAmelCase ( unittest.TestCase ): def lowerCamelCase (self ) -> Tuple: '''simple docstring''' if self.framework == "pytorch": subprocess.run( F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='''utf-8''' , check=__magic_name__ , ) assert hasattr(self , '''env''' ) def lowerCamelCase (self , __magic_name__ ) -> Any: '''simple docstring''' snake_case_ : List[Any] = F'''{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}''' # distributed data settings snake_case_ : int = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__magic_name__ , instance_count=__magic_name__ , instance_type=self.instance_type , debugger_hook_config=__magic_name__ , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__magic_name__ , py_version='''py36''' , ) def lowerCamelCase (self , __magic_name__ ) -> int: '''simple docstring''' TrainingJobAnalytics(__magic_name__ ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' ) @parameterized.expand([(2,)] ) def lowerCamelCase (self , __magic_name__ ) -> str: '''simple docstring''' snake_case_ : str = self.create_estimator(__magic_name__ ) # run training estimator.fit() # result dataframe snake_case_ : List[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis snake_case_ : List[str] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] ) snake_case_ : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping snake_case_ : Any = ( Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_9999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy ) assert all(t <= self.results['''eval_loss'''] for t in eval_loss ) # dump tests result into json file to share in PR with open(F'''{estimator.latest_training_job.name}.json''' , '''w''' ) as outfile: json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , __magic_name__ )
60
import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal _lowerCAmelCase : Any = datasets.utils.logging.get_logger(__name__) _lowerCAmelCase : Any = ["names", "prefix"] _lowerCAmelCase : str = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"] _lowerCAmelCase : List[str] = ["encoding_errors", "on_bad_lines"] _lowerCAmelCase : int = ["date_format"] @dataclass class __snake_case ( datasets.BuilderConfig ): SCREAMING_SNAKE_CASE__ = "," SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = "infer" SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = "." SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = '"' SCREAMING_SNAKE_CASE__ = 0 SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = 0 SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = 10000 SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = "strict" SCREAMING_SNAKE_CASE__ = "error" SCREAMING_SNAKE_CASE__ = None def SCREAMING_SNAKE_CASE_ ( self ): """simple docstring""" if self.delimiter is not None: lowerCAmelCase__ = self.delimiter if self.column_names is not None: lowerCAmelCase__ = self.column_names @property def SCREAMING_SNAKE_CASE_ ( self ): """simple docstring""" lowerCAmelCase__ = { 'sep': self.sep, 'header': self.header, 'names': self.names, 'index_col': self.index_col, 'usecols': self.usecols, 'prefix': self.prefix, 'mangle_dupe_cols': self.mangle_dupe_cols, 'engine': self.engine, 'converters': self.converters, 'true_values': self.true_values, 'false_values': self.false_values, 'skipinitialspace': self.skipinitialspace, 'skiprows': self.skiprows, 'nrows': self.nrows, 'na_values': self.na_values, 'keep_default_na': self.keep_default_na, 'na_filter': self.na_filter, 'verbose': self.verbose, 'skip_blank_lines': self.skip_blank_lines, 'thousands': self.thousands, 'decimal': self.decimal, 'lineterminator': self.lineterminator, 'quotechar': self.quotechar, 'quoting': self.quoting, 'escapechar': self.escapechar, 'comment': self.comment, 'encoding': self.encoding, 'dialect': self.dialect, 'error_bad_lines': self.error_bad_lines, 'warn_bad_lines': self.warn_bad_lines, 'skipfooter': self.skipfooter, 'doublequote': self.doublequote, 'memory_map': self.memory_map, 'float_precision': self.float_precision, 'chunksize': self.chunksize, 'encoding_errors': self.encoding_errors, 'on_bad_lines': self.on_bad_lines, 'date_format': self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() ,a_ ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class __snake_case ( datasets.ArrowBasedBuilder ): SCREAMING_SNAKE_CASE__ = CsvConfig def SCREAMING_SNAKE_CASE_ ( self ): """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def SCREAMING_SNAKE_CASE_ ( self ,a_ ): """simple docstring""" if not self.config.data_files: raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' ) lowerCAmelCase__ = dl_manager.download_and_extract(self.config.data_files ) if isinstance(a_ ,(str, list, tuple) ): lowerCAmelCase__ = data_files if isinstance(a_ ,a_ ): lowerCAmelCase__ = [files] lowerCAmelCase__ = [dl_manager.iter_files(a_ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={'files': files} )] lowerCAmelCase__ = [] for split_name, files in data_files.items(): if isinstance(a_ ,a_ ): lowerCAmelCase__ = [files] lowerCAmelCase__ = [dl_manager.iter_files(a_ ) for file in files] splits.append(datasets.SplitGenerator(name=a_ ,gen_kwargs={'files': files} ) ) return splits def SCREAMING_SNAKE_CASE_ ( self ,a_ ): """simple docstring""" if self.config.features is not None: lowerCAmelCase__ = self.config.features.arrow_schema if all(not require_storage_cast(a_ ) for feature in self.config.features.values() ): # cheaper cast lowerCAmelCase__ = pa.Table.from_arrays([pa_table[field.name] for field in schema] ,schema=a_ ) else: # more expensive cast; allows str <-> int/float or str to Audio for example lowerCAmelCase__ = table_cast(a_ ,a_ ) return pa_table def SCREAMING_SNAKE_CASE_ ( self ,a_ ): """simple docstring""" lowerCAmelCase__ = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str lowerCAmelCase__ = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(a_ ) else object for name, dtype, feature in zip(schema.names ,schema.types ,self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(a_ ) ): lowerCAmelCase__ = pd.read_csv(a_ ,iterator=a_ ,dtype=a_ ,**self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(a_ ): lowerCAmelCase__ = pa.Table.from_pandas(a_ ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(a_ ) except ValueError as e: logger.error(f'Failed to read file \'{file}\' with error {type(a_ )}: {e}' ) raise
193
0
import random def a_ ( lowerCAmelCase_ : list, lowerCAmelCase_ : List[Any] ): __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = [], [], [] for element in data: if element < pivot: less.append(lowerCAmelCase_ ) elif element > pivot: greater.append(lowerCAmelCase_ ) else: equal.append(lowerCAmelCase_ ) return less, equal, greater def a_ ( lowerCAmelCase_ : list, lowerCAmelCase_ : int ): # index = len(items) // 2 when trying to find the median # (value of index when items is sorted) # invalid input if index >= len(lowerCAmelCase_ ) or index < 0: return None __lowerCAmelCase = items[random.randint(0, len(lowerCAmelCase_ ) - 1 )] __lowerCAmelCase = 0 __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = _partition(lowerCAmelCase_, lowerCAmelCase_ ) __lowerCAmelCase = len(lowerCAmelCase_ ) __lowerCAmelCase = len(lowerCAmelCase_ ) # index is the pivot if m <= index < m + count: return pivot # must be in smaller elif m > index: return quick_select(lowerCAmelCase_, lowerCAmelCase_ ) # must be in larger else: return quick_select(lowerCAmelCase_, index - (m + count) )
700
_snake_case : List[Any] = [ 999, 800, 799, 600, 599, 500, 400, 399, 377, 355, 333, 311, 288, 266, 244, 222, 200, 199, 177, 155, 133, 111, 88, 66, 44, 22, 0, ] _snake_case : List[Any] = [ 999, 976, 952, 928, 905, 882, 858, 857, 810, 762, 715, 714, 572, 429, 428, 286, 285, 238, 190, 143, 142, 118, 95, 71, 47, 24, 0, ] _snake_case : Dict = [ 999, 988, 977, 966, 955, 944, 933, 922, 911, 900, 899, 879, 859, 840, 820, 800, 799, 766, 733, 700, 699, 650, 600, 599, 500, 499, 400, 399, 350, 300, 299, 266, 233, 200, 199, 179, 159, 140, 120, 100, 99, 88, 77, 66, 55, 44, 33, 22, 11, 0, ] _snake_case : Any = [ 999, 995, 992, 989, 985, 981, 978, 975, 971, 967, 964, 961, 957, 956, 951, 947, 942, 937, 933, 928, 923, 919, 914, 913, 908, 903, 897, 892, 887, 881, 876, 871, 870, 864, 858, 852, 846, 840, 834, 828, 827, 820, 813, 806, 799, 792, 785, 784, 777, 770, 763, 756, 749, 742, 741, 733, 724, 716, 707, 699, 698, 688, 677, 666, 656, 655, 645, 634, 623, 613, 612, 598, 584, 570, 569, 555, 541, 527, 526, 505, 484, 483, 462, 440, 439, 396, 395, 352, 351, 308, 307, 264, 263, 220, 219, 176, 132, 88, 44, 0, ] _snake_case : Tuple = [ 999, 997, 995, 992, 990, 988, 986, 984, 981, 979, 977, 975, 972, 970, 968, 966, 964, 961, 959, 957, 956, 954, 951, 949, 946, 944, 941, 939, 936, 934, 931, 929, 926, 924, 921, 919, 916, 914, 913, 910, 907, 905, 902, 899, 896, 893, 891, 888, 885, 882, 879, 877, 874, 871, 870, 867, 864, 861, 858, 855, 852, 849, 846, 843, 840, 837, 834, 831, 828, 827, 824, 821, 817, 814, 811, 808, 804, 801, 798, 795, 791, 788, 785, 784, 780, 777, 774, 770, 766, 763, 760, 756, 752, 749, 746, 742, 741, 737, 733, 730, 726, 722, 718, 714, 710, 707, 703, 699, 698, 694, 690, 685, 681, 677, 673, 669, 664, 660, 656, 655, 650, 646, 641, 636, 632, 627, 622, 618, 613, 612, 607, 602, 596, 591, 586, 580, 575, 570, 569, 563, 557, 551, 545, 539, 533, 527, 526, 519, 512, 505, 498, 491, 484, 483, 474, 466, 457, 449, 440, 439, 428, 418, 407, 396, 395, 381, 366, 352, 351, 330, 308, 307, 286, 264, 263, 242, 220, 219, 176, 175, 132, 131, 88, 44, 0, ] _snake_case : Optional[int] = [ 999, 991, 982, 974, 966, 958, 950, 941, 933, 925, 916, 908, 900, 899, 874, 850, 825, 800, 799, 700, 600, 500, 400, 300, 200, 100, 0, ] _snake_case : int = [ 999, 992, 985, 978, 971, 964, 957, 949, 942, 935, 928, 921, 914, 907, 900, 899, 879, 859, 840, 820, 800, 799, 766, 733, 700, 699, 650, 600, 599, 500, 499, 400, 399, 300, 299, 200, 199, 100, 99, 0, ] _snake_case : Optional[Any] = [ 999, 996, 992, 989, 985, 982, 979, 975, 972, 968, 965, 961, 958, 955, 951, 948, 944, 941, 938, 934, 931, 927, 924, 920, 917, 914, 910, 907, 903, 900, 899, 891, 884, 876, 869, 861, 853, 846, 838, 830, 823, 815, 808, 800, 799, 788, 777, 766, 755, 744, 733, 722, 711, 700, 699, 688, 677, 666, 655, 644, 633, 622, 611, 600, 599, 585, 571, 557, 542, 528, 514, 500, 499, 485, 471, 457, 442, 428, 414, 400, 399, 379, 359, 340, 320, 300, 299, 279, 259, 240, 220, 200, 199, 166, 133, 100, 99, 66, 33, 0, ]
421
0
'''simple docstring''' import dataclasses import re import string from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple import numpy as np from . import residue_constants __SCREAMING_SNAKE_CASE : Optional[Any] =Mapping[str, np.ndarray] __SCREAMING_SNAKE_CASE : str =Mapping[str, Any] # Is a nested dict. __SCREAMING_SNAKE_CASE : Optional[Any] =0.01 @dataclasses.dataclass(frozen=snake_case_ ) class SCREAMING_SNAKE_CASE__ : """simple docstring""" A__ : np.ndarray # [num_res, num_atom_type, 3] # Amino-acid type for each residue represented as an integer between 0 and # 20, where 20 is 'X'. A__ : np.ndarray # [num_res] # Binary float mask to indicate presence of a particular atom. 1.0 if an atom # is present and 0.0 if not. This should be used for loss masking. A__ : np.ndarray # [num_res, num_atom_type] # Residue index as used in PDB. It is not necessarily continuous or 0-indexed. A__ : np.ndarray # [num_res] # B-factors, or temperature factors, of each residue (in sq. angstroms units), # representing the displacement of the residue from its ground truth mean # value. A__ : np.ndarray # [num_res, num_atom_type] # Chain indices for multi-chain predictions A__ : Optional[np.ndarray] = None # Optional remark about the protein. Included as a comment in output PDB # files A__ : Optional[str] = None # Templates used to generate this protein (prediction-only) A__ : Optional[Sequence[str]] = None # Chain corresponding to each parent A__ : Optional[Sequence[int]] = None def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : str ): '''simple docstring''' A: Union[str, Any] = r"""(\[[A-Z]+\]\n)""" A: List[str] = [tag.strip() for tag in re.split(lowerCamelCase__ , lowerCamelCase__ ) if len(lowerCamelCase__ ) > 0] A: Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split("""\n""" ) for l in tags[1::2]] ) A: List[str] = ["N", "CA", "C"] A: Optional[Any] = None A: Optional[int] = None A: str = None for g in groups: if "[PRIMARY]" == g[0]: A: str = g[1][0].strip() for i in range(len(lowerCamelCase__ ) ): if seq[i] not in residue_constants.restypes: A: Any = """X""" # FIXME: strings are immutable A: Union[str, Any] = np.array( [residue_constants.restype_order.get(lowerCamelCase__ , residue_constants.restype_num ) for res_symbol in seq] ) elif "[TERTIARY]" == g[0]: A: List[List[float]] = [] for axis in range(3 ): tertiary.append(list(map(lowerCamelCase__ , g[1][axis].split() ) ) ) A: Dict = np.array(lowerCamelCase__ ) A: str = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa ) for i, atom in enumerate(lowerCamelCase__ ): A: List[Any] = np.transpose(tertiary_np[:, i::3] ) atom_positions *= PICO_TO_ANGSTROM elif "[MASK]" == g[0]: A: Optional[int] = np.array(list(map({"""-""": 0, """+""": 1}.get , g[1][0].strip() ) ) ) A: Optional[Any] = np.zeros( ( len(lowerCamelCase__ ), residue_constants.atom_type_num, ) ).astype(np.floataa ) for i, atom in enumerate(lowerCamelCase__ ): A: int = 1 atom_mask *= mask[..., None] assert aatype is not None return Protein( atom_positions=lowerCamelCase__ , atom_mask=lowerCamelCase__ , aatype=lowerCamelCase__ , residue_index=np.arange(len(lowerCamelCase__ ) ) , b_factors=lowerCamelCase__ , ) def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : Protein , lowerCamelCase__ : int = 0 ): '''simple docstring''' A: List[str] = [] A: Dict = prot.remark if remark is not None: pdb_headers.append(f'REMARK {remark}' ) A: int = prot.parents A: Tuple = prot.parents_chain_index if parents is not None and parents_chain_index is not None: A: Any = [p for i, p in zip(lowerCamelCase__ , lowerCamelCase__ ) if i == chain_id] if parents is None or len(lowerCamelCase__ ) == 0: A: Dict = ["""N/A"""] pdb_headers.append(f'PARENT {" ".join(lowerCamelCase__ )}' ) return pdb_headers def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : Protein , lowerCamelCase__ : str ): '''simple docstring''' A: List[str] = [] A: Optional[int] = pdb_str.split("""\n""" ) A: Optional[int] = prot.remark if remark is not None: out_pdb_lines.append(f'REMARK {remark}' ) A: List[List[str]] if prot.parents is not None and len(prot.parents ) > 0: A: List[Any] = [] if prot.parents_chain_index is not None: A: Dict[str, List[str]] = {} for p, i in zip(prot.parents , prot.parents_chain_index ): parent_dict.setdefault(str(lowerCamelCase__ ) , [] ) parent_dict[str(lowerCamelCase__ )].append(lowerCamelCase__ ) A: int = max([int(lowerCamelCase__ ) for chain_idx in parent_dict] ) for i in range(max_idx + 1 ): A: List[Any] = parent_dict.get(str(lowerCamelCase__ ) , ["""N/A"""] ) parents_per_chain.append(lowerCamelCase__ ) else: parents_per_chain.append(list(prot.parents ) ) else: A: Optional[Any] = [["""N/A"""]] def make_parent_line(lowerCamelCase__ : Sequence[str] ) -> str: return f'PARENT {" ".join(lowerCamelCase__ )}' out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) ) A: Optional[int] = 0 for i, l in enumerate(lowerCamelCase__ ): if "PARENT" not in l and "REMARK" not in l: out_pdb_lines.append(lowerCamelCase__ ) if "TER" in l and "END" not in lines[i + 1]: chain_counter += 1 if not chain_counter >= len(lowerCamelCase__ ): A: str = parents_per_chain[chain_counter] else: A: Optional[Any] = ["""N/A"""] out_pdb_lines.append(make_parent_line(lowerCamelCase__ ) ) return "\n".join(lowerCamelCase__ ) def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : Protein ): '''simple docstring''' A: int = residue_constants.restypes + ["""X"""] def res_atoa(lowerCamelCase__ : int ) -> str: return residue_constants.restype_atoa.get(restypes[r] , """UNK""" ) A: Any = residue_constants.atom_types A: List[str] = [] A: Tuple = prot.atom_mask A: List[str] = prot.aatype A: int = prot.atom_positions A: Optional[int] = prot.residue_index.astype(np.intaa ) A: str = prot.b_factors A: Dict = prot.chain_index if np.any(aatype > residue_constants.restype_num ): raise ValueError("""Invalid aatypes.""" ) A: List[str] = get_pdb_headers(lowerCamelCase__ ) if len(lowerCamelCase__ ) > 0: pdb_lines.extend(lowerCamelCase__ ) A: int = aatype.shape[0] A: List[Any] = 1 A: Optional[int] = 0 A: Optional[Any] = string.ascii_uppercase A: List[str] = None # Add all atom sites. for i in range(lowerCamelCase__ ): A: str = res_atoa(aatype[i] ) for atom_name, pos, mask, b_factor in zip(lowerCamelCase__ , atom_positions[i] , atom_mask[i] , b_factors[i] ): if mask < 0.5: continue A: Optional[Any] = """ATOM""" A: Optional[Any] = atom_name if len(lowerCamelCase__ ) == 4 else f' {atom_name}' A: List[Any] = """""" A: Union[str, Any] = """""" A: List[Any] = 1.00 A: Tuple = atom_name[0] # Protein supports only C, N, O, S, this works. A: Optional[Any] = """""" A: Any = """A""" if chain_index is not None: A: str = chain_tags[chain_index[i]] # PDB is a columnar format, every space matters here! A: Optional[int] = ( f'{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}' f'{res_name_a:>3} {chain_tag:>1}' f'{residue_index[i]:>4}{insertion_code:>1} ' f'{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}' f'{occupancy:>6.2f}{b_factor:>6.2f} ' f'{element:>2}{charge:>2}' ) pdb_lines.append(lowerCamelCase__ ) atom_index += 1 A: List[Any] = i == n - 1 if chain_index is not None: if i != n - 1 and chain_index[i + 1] != prev_chain_index: A: Any = True A: Any = chain_index[i + 1] if should_terminate: # Close the chain. A: Optional[int] = """TER""" A: Dict = ( f'{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}' ) pdb_lines.append(lowerCamelCase__ ) atom_index += 1 if i != n - 1: # "prev" is a misnomer here. This happens at the beginning of # each new chain. pdb_lines.extend(get_pdb_headers(lowerCamelCase__ , lowerCamelCase__ ) ) pdb_lines.append("""END""" ) pdb_lines.append("""""" ) return "\n".join(lowerCamelCase__ ) def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : Protein ): '''simple docstring''' return residue_constants.STANDARD_ATOM_MASK[prot.aatype] def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : FeatureDict , lowerCamelCase__ : ModelOutput , lowerCamelCase__ : Optional[np.ndarray] = None , lowerCamelCase__ : Optional[np.ndarray] = None , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[Sequence[str]] = None , lowerCamelCase__ : Optional[Sequence[int]] = None , ): '''simple docstring''' return Protein( aatype=features["""aatype"""] , atom_positions=result["""final_atom_positions"""] , atom_mask=result["""final_atom_mask"""] , residue_index=features["""residue_index"""] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["""final_atom_mask"""] ) , chain_index=lowerCamelCase__ , remark=lowerCamelCase__ , parents=lowerCamelCase__ , parents_chain_index=lowerCamelCase__ , )
135
'''simple docstring''' import unittest import numpy as np from diffusers import OnnxStableDiffusionInpaintPipelineLegacy from diffusers.utils.testing_utils import ( is_onnx_available, load_image, load_numpy, nightly, require_onnxruntime, require_torch_gpu, ) if is_onnx_available(): import onnxruntime as ort @nightly @require_onnxruntime @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" @property def a__ ( self ) -> Optional[Any]: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def a__ ( self ) -> int: A: int = ort.SessionOptions() A: List[str] = False return options def a__ ( self ) -> List[str]: A: Union[str, Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo.png""" ) A: str = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" ) A: Optional[int] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" ) # using the PNDM scheduler by default A: Any = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained( """CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=A , feature_extractor=A , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=A ) A: Union[str, Any] = """A red cat sitting on a park bench""" A: List[str] = np.random.RandomState(0 ) A: str = pipe( prompt=A , image=A , mask_image=A , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=A , output_type="""np""" , ) A: List[Any] = output.images[0] assert image.shape == (5_12, 5_12, 3) assert np.abs(expected_image - image ).max() < 1e-2
135
1
from collections.abc import Callable import numpy as np def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): __UpperCamelCase :Any = int(np.ceil((x_end - xa) / step_size ) ) __UpperCamelCase :List[Any] = np.zeros((n + 1,) ) __UpperCamelCase :Tuple = ya __UpperCamelCase :Dict = xa for k in range(SCREAMING_SNAKE_CASE ): __UpperCamelCase :List[str] = y[k] + step_size * ode_func(SCREAMING_SNAKE_CASE , y[k] ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
721
import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs __lowercase = imread(r'''digital_image_processing/image_data/lena_small.jpg''') __lowercase = cvtColor(img, COLOR_BGR2GRAY) def lowerCamelCase ( ): '''simple docstring''' __UpperCamelCase :Optional[Any] = cn.convert_to_negative(SCREAMING_SNAKE_CASE ) # assert negative_img array for at least one True assert negative_img.any() def lowerCamelCase ( ): '''simple docstring''' with Image.open('''digital_image_processing/image_data/lena_small.jpg''' ) as img: # Work around assertion for response assert str(cc.change_contrast(SCREAMING_SNAKE_CASE , 110 ) ).startswith( '''<PIL.Image.Image image mode=RGB size=100x100 at''' ) def lowerCamelCase ( ): '''simple docstring''' __UpperCamelCase :List[Any] = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def lowerCamelCase ( ): '''simple docstring''' __UpperCamelCase :List[str] = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0 ) # assert ambiguous array for all == True assert canny_img.all() __UpperCamelCase :str = canny.canny(SCREAMING_SNAKE_CASE ) # assert canny array for at least one True assert canny_array.any() def lowerCamelCase ( ): '''simple docstring''' assert gg.gaussian_filter(SCREAMING_SNAKE_CASE , 5 , sigma=0.9 ).all() def lowerCamelCase ( ): '''simple docstring''' __UpperCamelCase :List[str] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) __UpperCamelCase :Optional[Any] = conv.img_convolve(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).astype(SCREAMING_SNAKE_CASE ) assert res.any() def lowerCamelCase ( ): '''simple docstring''' assert med.median_filter(SCREAMING_SNAKE_CASE , 3 ).any() def lowerCamelCase ( ): '''simple docstring''' __UpperCamelCase , __UpperCamelCase :Dict = sob.sobel_filter(SCREAMING_SNAKE_CASE ) assert grad.any() and theta.any() def lowerCamelCase ( ): '''simple docstring''' __UpperCamelCase :Dict = sp.make_sepia(SCREAMING_SNAKE_CASE , 20 ) assert sepia.all() def lowerCamelCase ( SCREAMING_SNAKE_CASE = "digital_image_processing/image_data/lena_small.jpg" ): '''simple docstring''' __UpperCamelCase :Dict = bs.Burkes(imread(SCREAMING_SNAKE_CASE , 1 ) , 120 ) burkes.process() assert burkes.output_img.any() def lowerCamelCase ( SCREAMING_SNAKE_CASE = "digital_image_processing/image_data/lena_small.jpg" , ): '''simple docstring''' __UpperCamelCase :List[str] = rs.NearestNeighbour(imread(SCREAMING_SNAKE_CASE , 1 ) , 400 , 200 ) nn.process() assert nn.output.any() def lowerCamelCase ( ): '''simple docstring''' __UpperCamelCase :Union[str, Any] = '''digital_image_processing/image_data/lena.jpg''' # Reading the image and converting it to grayscale. __UpperCamelCase :Optional[int] = imread(SCREAMING_SNAKE_CASE , 0 ) # Test for get_neighbors_pixel function() return not None __UpperCamelCase :Optional[int] = 0 __UpperCamelCase :List[Any] = 0 __UpperCamelCase :Dict = image[x_coordinate][y_coordinate] __UpperCamelCase :int = lbp.get_neighbors_pixel( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image __UpperCamelCase :Tuple = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): __UpperCamelCase :Optional[int] = lbp.local_binary_value(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) assert lbp_image.any()
452
0
'''simple docstring''' from math import isqrt, loga def __a ( lowerCAmelCase__ : int ): a__ : str = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , lowerCAmelCase__ , lowerCAmelCase__ ): a__ : Any = False return [i for i in range(2 , lowerCAmelCase__ ) if is_prime[i]] def __a ( lowerCAmelCase__ : int = 800800 , lowerCAmelCase__ : int = 800800 ): a__ : Optional[Any] = degree * loga(lowerCAmelCase__ ) a__ : str = int(lowerCAmelCase__ ) a__ : str = calculate_prime_numbers(lowerCAmelCase__ ) a__ : List[Any] = 0 a__ : Tuple = 0 a__ : str = len(lowerCAmelCase__ ) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left] ) + prime_numbers[left] * loga(prime_numbers[right] ) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(f'{solution() = }')
688
'''simple docstring''' import enum import shutil import sys __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = shutil.get_terminal_size() __SCREAMING_SNAKE_CASE = {'UP': 'A', 'DOWN': 'B', 'RIGHT': 'C', 'LEFT': 'D'} class lowerCAmelCase__ ( enum.Enum ): """simple docstring""" __UpperCamelCase = 0 __UpperCamelCase = 1 def __a ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict="" ): sys.stdout.write(str(lowerCAmelCase__ ) + end ) sys.stdout.flush() def __a ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int , lowerCAmelCase__ : int="" ): forceWrite(F'\u001b[{color}m{content}\u001b[0m' , lowerCAmelCase__ ) def __a ( ): forceWrite('''\r''' ) def __a ( lowerCAmelCase__ : int , lowerCAmelCase__ : str ): forceWrite(F'\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}' ) def __a ( ): forceWrite(''' ''' * TERMINAL_WIDTH ) reset_cursor() def __a ( ): reset_cursor() forceWrite('''-''' * TERMINAL_WIDTH )
688
1
import importlib.metadata import warnings from copy import deepcopy from packaging import version from ..utils import logging from .import_utils import is_accelerate_available, is_bitsandbytes_available if is_bitsandbytes_available(): import bitsandbytes as bnb import torch import torch.nn as nn from ..pytorch_utils import ConvaD if is_accelerate_available(): from accelerate import init_empty_weights from accelerate.utils import find_tied_parameters lowercase_ = logging.get_logger(__name__) def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None ): # Recurse if needed if "." in tensor_name: __lowerCamelCase : List[Any] = tensor_name.split('.' ) for split in splits[:-1]: __lowerCamelCase : Dict = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if new_module is None: raise ValueError(f'{module} has no attribute {split}.' ) __lowerCamelCase : str = new_module __lowerCamelCase : Any = splits[-1] if tensor_name not in module._parameters and tensor_name not in module._buffers: raise ValueError(f'{module} does not have a parameter or a buffer named {tensor_name}.' ) __lowerCamelCase : int = tensor_name in module._buffers __lowerCamelCase : str = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if old_value.device == torch.device('meta' ) and device not in ["meta", torch.device('meta' )] and value is None: raise ValueError(f'{tensor_name} is on the meta device, we need a `value` to put in on {device}.' ) __lowerCamelCase : int = False __lowerCamelCase : Optional[Any] = False if is_buffer or not is_bitsandbytes_available(): __lowerCamelCase : int = False __lowerCamelCase : List[str] = False else: __lowerCamelCase : Optional[int] = hasattr(bnb.nn , 'Params4bit' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit ) __lowerCamelCase : Optional[Any] = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams ) if is_abit or is_abit: __lowerCamelCase : Union[str, Any] = module._parameters[tensor_name] if param.device.type != "cuda": if value is None: __lowerCamelCase : List[str] = old_value.to(SCREAMING_SNAKE_CASE__ ) elif isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ): __lowerCamelCase : Dict = value.to('cpu' ) if value.dtype == torch.inta: __lowerCamelCase : Optional[Any] = version.parse(importlib.metadata.version('bitsandbytes' ) ) > version.parse( '0.37.2' ) if not is_abit_serializable: raise ValueError( 'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. ' 'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.' ) else: __lowerCamelCase : Union[str, Any] = torch.tensor(SCREAMING_SNAKE_CASE__ , device='cpu' ) # Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization. # Since weights are saved in the correct "orientation", we skip transposing when loading. if issubclass(module.source_cls , SCREAMING_SNAKE_CASE__ ) and fpaa_statistics is None: __lowerCamelCase : Optional[Any] = new_value.T __lowerCamelCase : Optional[int] = old_value.__dict__ if is_abit: __lowerCamelCase : Optional[Any] = bnb.nn.IntaParams(SCREAMING_SNAKE_CASE__ , requires_grad=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ) elif is_abit: __lowerCamelCase : Optional[int] = bnb.nn.Paramsabit(SCREAMING_SNAKE_CASE__ , requires_grad=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase : Optional[int] = new_value if fpaa_statistics is not None: setattr(module.weight , 'SCB' , fpaa_statistics.to(SCREAMING_SNAKE_CASE__ ) ) else: if value is None: __lowerCamelCase : Tuple = old_value.to(SCREAMING_SNAKE_CASE__ ) elif isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ): __lowerCamelCase : List[Any] = value.to(SCREAMING_SNAKE_CASE__ ) else: __lowerCamelCase : Tuple = torch.tensor(SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ ) if is_buffer: __lowerCamelCase : List[str] = new_value else: __lowerCamelCase : Tuple = nn.Parameter(SCREAMING_SNAKE_CASE__ , requires_grad=old_value.requires_grad ) __lowerCamelCase : Optional[Any] = new_value def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=False ): for name, module in model.named_children(): if current_key_name is None: __lowerCamelCase : Union[str, Any] = [] current_key_name.append(SCREAMING_SNAKE_CASE__ ) if (isinstance(SCREAMING_SNAKE_CASE__ , nn.Linear ) or isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` if not any(key in '.'.join(SCREAMING_SNAKE_CASE__ ) for key in modules_to_not_convert ): with init_empty_weights(): if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): __lowerCamelCase , __lowerCamelCase : Optional[Any] = module.weight.shape else: __lowerCamelCase : Dict = module.in_features __lowerCamelCase : Any = module.out_features if quantization_config.quantization_method() == "llm_int8": __lowerCamelCase : Union[str, Any] = bnb.nn.LinearabitLt( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , ) __lowerCamelCase : Tuple = True else: if ( quantization_config.llm_inta_skip_modules is not None and name in quantization_config.llm_inta_skip_modules ): pass else: __lowerCamelCase : List[Any] = bnb.nn.Linearabit( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , ) __lowerCamelCase : Union[str, Any] = True # Store the module class in case we need to transpose the weight later __lowerCamelCase : Any = type(SCREAMING_SNAKE_CASE__ ) # Force requires grad to False to avoid unexpected errors model._modules[name].requires_grad_(SCREAMING_SNAKE_CASE__ ) if len(list(module.children() ) ) > 0: __lowerCamelCase , __lowerCamelCase : Optional[int] = _replace_with_bnb_linear( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , has_been_replaced=SCREAMING_SNAKE_CASE__ , ) # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None ): __lowerCamelCase : Optional[int] = ['lm_head'] if modules_to_not_convert is None else modules_to_not_convert __lowerCamelCase , __lowerCamelCase : Any = _replace_with_bnb_linear( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if not has_been_replaced: logger.warning( 'You are loading your model in 8bit or 4bit but no linear modules were found in your model.' ' Please double check your model architecture, or submit an issue on github if you think this is' ' a bug.' ) return model def UpperCamelCase__ ( *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): warnings.warn( '`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead' , SCREAMING_SNAKE_CASE__ , ) return replace_with_bnb_linear(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def UpperCamelCase__ ( *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): warnings.warn( '`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead' , SCREAMING_SNAKE_CASE__ , ) return set_module_quantized_tensor_to_device(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ): __lowerCamelCase : Optional[Any] = deepcopy(SCREAMING_SNAKE_CASE__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager` tied_model.tie_weights() __lowerCamelCase : Any = find_tied_parameters(SCREAMING_SNAKE_CASE__ ) # For compatibility with Accelerate < 0.18 if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): __lowerCamelCase : Optional[Any] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: __lowerCamelCase : Any = sum(SCREAMING_SNAKE_CASE__ , [] ) __lowerCamelCase : int = len(SCREAMING_SNAKE_CASE__ ) > 0 # Check if it is a base model __lowerCamelCase : Optional[int] = not hasattr(SCREAMING_SNAKE_CASE__ , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head __lowerCamelCase : Any = list(model.named_children() ) __lowerCamelCase : int = [list_modules[-1][0]] # add last module together with tied weights __lowerCamelCase : Any = set(SCREAMING_SNAKE_CASE__ ) - set(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase : List[Any] = list(set(SCREAMING_SNAKE_CASE__ ) ) + list(SCREAMING_SNAKE_CASE__ ) # remove ".weight" from the keys __lowerCamelCase : Dict = ['.weight', '.bias'] __lowerCamelCase : int = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: __lowerCamelCase : Optional[int] = name.replace(SCREAMING_SNAKE_CASE__ , '' ) filtered_module_names.append(SCREAMING_SNAKE_CASE__ ) return filtered_module_names
230
from dataclasses import dataclass from typing import Optional import numpy as np import torch import torch.nn as nn from ..utils import BaseOutput, is_torch_version, randn_tensor from .attention_processor import SpatialNorm from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block @dataclass class A_ ( __UpperCamelCase ): '''simple docstring''' __snake_case = 42 class A_ ( nn.Module ): '''simple docstring''' def __init__( self: List[Any] , a: Optional[Any]=3 , a: Tuple=3 , a: str=("DownEncoderBlock2D",) , a: str=(64,) , a: Optional[int]=2 , a: int=32 , a: str="silu" , a: Optional[Any]=True , ): super().__init__() __lowerCamelCase : int = layers_per_block __lowerCamelCase : List[Any] = torch.nn.Convad( a , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , ) __lowerCamelCase : Tuple = None __lowerCamelCase : Dict = nn.ModuleList([] ) # down __lowerCamelCase : Optional[int] = block_out_channels[0] for i, down_block_type in enumerate(a ): __lowerCamelCase : str = output_channel __lowerCamelCase : Optional[int] = block_out_channels[i] __lowerCamelCase : Dict = i == len(a ) - 1 __lowerCamelCase : List[Any] = get_down_block( a , num_layers=self.layers_per_block , in_channels=a , out_channels=a , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=a , resnet_groups=a , attention_head_dim=a , temb_channels=a , ) self.down_blocks.append(a ) # mid __lowerCamelCase : Optional[Any] = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=a , output_scale_factor=1 , resnet_time_scale_shift='default' , attention_head_dim=block_out_channels[-1] , resnet_groups=a , temb_channels=a , ) # out __lowerCamelCase : str = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=a , eps=1e-6 ) __lowerCamelCase : Optional[Any] = nn.SiLU() __lowerCamelCase : int = 2 * out_channels if double_z else out_channels __lowerCamelCase : Tuple = nn.Convad(block_out_channels[-1] , a , 3 , padding=1 ) __lowerCamelCase : List[Any] = False def _snake_case ( self: List[str] , a: List[Any] ): __lowerCamelCase : List[str] = x __lowerCamelCase : Dict = self.conv_in(a ) if self.training and self.gradient_checkpointing: def create_custom_forward(a: int ): def custom_forward(*a: Optional[Any] ): return module(*a ) return custom_forward # down if is_torch_version('>=' , '1.11.0' ): for down_block in self.down_blocks: __lowerCamelCase : Union[str, Any] = torch.utils.checkpoint.checkpoint( create_custom_forward(a ) , a , use_reentrant=a ) # middle __lowerCamelCase : Any = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , a , use_reentrant=a ) else: for down_block in self.down_blocks: __lowerCamelCase : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(a ) , a ) # middle __lowerCamelCase : int = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , a ) else: # down for down_block in self.down_blocks: __lowerCamelCase : List[Any] = down_block(a ) # middle __lowerCamelCase : Union[str, Any] = self.mid_block(a ) # post-process __lowerCamelCase : Tuple = self.conv_norm_out(a ) __lowerCamelCase : List[str] = self.conv_act(a ) __lowerCamelCase : int = self.conv_out(a ) return sample class A_ ( nn.Module ): '''simple docstring''' def __init__( self: int , a: List[str]=3 , a: Tuple=3 , a: str=("UpDecoderBlock2D",) , a: Union[str, Any]=(64,) , a: Optional[Any]=2 , a: Optional[Any]=32 , a: str="silu" , a: Union[str, Any]="group" , ): super().__init__() __lowerCamelCase : List[Any] = layers_per_block __lowerCamelCase : Any = nn.Convad( a , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , ) __lowerCamelCase : Tuple = None __lowerCamelCase : int = nn.ModuleList([] ) __lowerCamelCase : Optional[Any] = in_channels if norm_type == 'spatial' else None # mid __lowerCamelCase : List[Any] = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=a , output_scale_factor=1 , resnet_time_scale_shift='default' if norm_type == 'group' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=a , temb_channels=a , ) # up __lowerCamelCase : Any = list(reversed(a ) ) __lowerCamelCase : Dict = reversed_block_out_channels[0] for i, up_block_type in enumerate(a ): __lowerCamelCase : List[Any] = output_channel __lowerCamelCase : List[str] = reversed_block_out_channels[i] __lowerCamelCase : Optional[Any] = i == len(a ) - 1 __lowerCamelCase : Optional[Any] = get_up_block( a , num_layers=self.layers_per_block + 1 , in_channels=a , out_channels=a , prev_output_channel=a , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=a , resnet_groups=a , attention_head_dim=a , temb_channels=a , resnet_time_scale_shift=a , ) self.up_blocks.append(a ) __lowerCamelCase : List[str] = output_channel # out if norm_type == "spatial": __lowerCamelCase : int = SpatialNorm(block_out_channels[0] , a ) else: __lowerCamelCase : Optional[Any] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=a , eps=1e-6 ) __lowerCamelCase : Union[str, Any] = nn.SiLU() __lowerCamelCase : List[Any] = nn.Convad(block_out_channels[0] , a , 3 , padding=1 ) __lowerCamelCase : List[str] = False def _snake_case ( self: Optional[int] , a: Tuple , a: List[str]=None ): __lowerCamelCase : List[str] = z __lowerCamelCase : Union[str, Any] = self.conv_in(a ) __lowerCamelCase : List[str] = next(iter(self.up_blocks.parameters() ) ).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(a: Any ): def custom_forward(*a: str ): return module(*a ) return custom_forward if is_torch_version('>=' , '1.11.0' ): # middle __lowerCamelCase : Union[str, Any] = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , a , a , use_reentrant=a ) __lowerCamelCase : str = sample.to(a ) # up for up_block in self.up_blocks: __lowerCamelCase : Union[str, Any] = torch.utils.checkpoint.checkpoint( create_custom_forward(a ) , a , a , use_reentrant=a ) else: # middle __lowerCamelCase : Dict = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , a , a ) __lowerCamelCase : int = sample.to(a ) # up for up_block in self.up_blocks: __lowerCamelCase : List[str] = torch.utils.checkpoint.checkpoint(create_custom_forward(a ) , a , a ) else: # middle __lowerCamelCase : int = self.mid_block(a , a ) __lowerCamelCase : List[str] = sample.to(a ) # up for up_block in self.up_blocks: __lowerCamelCase : List[str] = up_block(a , a ) # post-process if latent_embeds is None: __lowerCamelCase : Optional[int] = self.conv_norm_out(a ) else: __lowerCamelCase : Dict = self.conv_norm_out(a , a ) __lowerCamelCase : Any = self.conv_act(a ) __lowerCamelCase : str = self.conv_out(a ) return sample class A_ ( nn.Module ): '''simple docstring''' def __init__( self: Optional[int] , a: List[Any] , a: List[Any] , a: List[Any] , a: Tuple=None , a: Tuple="random" , a: List[Any]=False , a: List[str]=True ): super().__init__() __lowerCamelCase : Optional[Any] = n_e __lowerCamelCase : Optional[int] = vq_embed_dim __lowerCamelCase : Tuple = beta __lowerCamelCase : List[str] = legacy __lowerCamelCase : str = nn.Embedding(self.n_e , self.vq_embed_dim ) self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e ) __lowerCamelCase : str = remap if self.remap is not None: self.register_buffer('used' , torch.tensor(np.load(self.remap ) ) ) __lowerCamelCase : Dict = self.used.shape[0] __lowerCamelCase : Optional[Any] = unknown_index # "random" or "extra" or integer if self.unknown_index == "extra": __lowerCamelCase : Any = self.re_embed __lowerCamelCase : Optional[int] = self.re_embed + 1 print( F'Remapping {self.n_e} indices to {self.re_embed} indices. ' F'Using {self.unknown_index} for unknown indices.' ) else: __lowerCamelCase : int = n_e __lowerCamelCase : Optional[Any] = sane_index_shape def _snake_case ( self: Tuple , a: Union[str, Any] ): __lowerCamelCase : Optional[Any] = inds.shape assert len(a ) > 1 __lowerCamelCase : List[Any] = inds.reshape(ishape[0] , -1 ) __lowerCamelCase : Any = self.used.to(a ) __lowerCamelCase : Union[str, Any] = (inds[:, :, None] == used[None, None, ...]).long() __lowerCamelCase : Dict = match.argmax(-1 ) __lowerCamelCase : List[Any] = match.sum(2 ) < 1 if self.unknown_index == "random": __lowerCamelCase : Tuple = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device ) else: __lowerCamelCase : str = self.unknown_index return new.reshape(a ) def _snake_case ( self: Tuple , a: Optional[int] ): __lowerCamelCase : List[Any] = inds.shape assert len(a ) > 1 __lowerCamelCase : Optional[int] = inds.reshape(ishape[0] , -1 ) __lowerCamelCase : Union[str, Any] = self.used.to(a ) if self.re_embed > self.used.shape[0]: # extra token __lowerCamelCase : Optional[Any] = 0 # simply set to zero __lowerCamelCase : Optional[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , a ) return back.reshape(a ) def _snake_case ( self: int , a: List[str] ): # reshape z -> (batch, height, width, channel) and flatten __lowerCamelCase : Union[str, Any] = z.permute(0 , 2 , 3 , 1 ).contiguous() __lowerCamelCase : List[Any] = z.view(-1 , self.vq_embed_dim ) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z __lowerCamelCase : int = torch.argmin(torch.cdist(a , self.embedding.weight ) , dim=1 ) __lowerCamelCase : str = self.embedding(a ).view(z.shape ) __lowerCamelCase : str = None __lowerCamelCase : Any = None # compute loss for embedding if not self.legacy: __lowerCamelCase : int = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 ) else: __lowerCamelCase : List[Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 ) # preserve gradients __lowerCamelCase : int = z + (z_q - z).detach() # reshape back to match original input shape __lowerCamelCase : Any = z_q.permute(0 , 3 , 1 , 2 ).contiguous() if self.remap is not None: __lowerCamelCase : Optional[Any] = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis __lowerCamelCase : Optional[Any] = self.remap_to_used(a ) __lowerCamelCase : Dict = min_encoding_indices.reshape(-1 , 1 ) # flatten if self.sane_index_shape: __lowerCamelCase : str = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] ) return z_q, loss, (perplexity, min_encodings, min_encoding_indices) def _snake_case ( self: Tuple , a: Optional[int] , a: Any ): # shape specifying (batch, height, width, channel) if self.remap is not None: __lowerCamelCase : Any = indices.reshape(shape[0] , -1 ) # add batch axis __lowerCamelCase : Any = self.unmap_to_all(a ) __lowerCamelCase : int = indices.reshape(-1 ) # flatten again # get quantized latent vectors __lowerCamelCase : str = self.embedding(a ) if shape is not None: __lowerCamelCase : str = z_q.view(a ) # reshape back to match original input shape __lowerCamelCase : Optional[Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous() return z_q class A_ ( __UpperCamelCase ): '''simple docstring''' def __init__( self: str , a: Dict , a: Any=False ): __lowerCamelCase : Tuple = parameters __lowerCamelCase , __lowerCamelCase : Any = torch.chunk(a , 2 , dim=1 ) __lowerCamelCase : List[str] = torch.clamp(self.logvar , -3_0.0 , 2_0.0 ) __lowerCamelCase : int = deterministic __lowerCamelCase : Dict = torch.exp(0.5 * self.logvar ) __lowerCamelCase : str = torch.exp(self.logvar ) if self.deterministic: __lowerCamelCase : Optional[Any] = torch.zeros_like( self.mean , device=self.parameters.device , dtype=self.parameters.dtype ) def _snake_case ( self: Union[str, Any] , a: Optional[torch.Generator] = None ): # make sure sample is on the same device as the parameters and has same dtype __lowerCamelCase : Union[str, Any] = randn_tensor( self.mean.shape , generator=a , device=self.parameters.device , dtype=self.parameters.dtype ) __lowerCamelCase : str = self.mean + self.std * sample return x def _snake_case ( self: List[str] , a: Union[str, Any]=None ): if self.deterministic: return torch.Tensor([0.0] ) else: if other is None: return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] ) else: return 0.5 * torch.sum( torch.pow(self.mean - other.mean , 2 ) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar , dim=[1, 2, 3] , ) def _snake_case ( self: Optional[Any] , a: str , a: Any=[1, 2, 3] ): if self.deterministic: return torch.Tensor([0.0] ) __lowerCamelCase : int = np.log(2.0 * np.pi ) return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=a ) def _snake_case ( self: Optional[int] ): return self.mean
230
1
import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets _A : List[Any] = """ @inproceedings{xu-etal-2016-optimizing, title = {Optimizing Statistical Machine Translation for Text Simplification}, authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris}, journal = {Transactions of the Association for Computational Linguistics}, volume = {4}, year={2016}, url = {https://www.aclweb.org/anthology/Q16-1029}, pages = {401--415 }, @inproceedings{post-2018-call, title = \"A Call for Clarity in Reporting {BLEU} Scores\", author = \"Post, Matt\", booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\", month = oct, year = \"2018\", address = \"Belgium, Brussels\", publisher = \"Association for Computational Linguistics\", url = \"https://www.aclweb.org/anthology/W18-6319\", pages = \"186--191\", } """ _A : Dict = """\ WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU It can be used to evaluate the quality of machine-generated texts. """ _A : Dict = """ Calculates sari score (between 0 and 100) given a list of source and predicted sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score. Args: sources: list of source sentences where each sentence should be a string. predictions: list of predicted sentences where each sentence should be a string. references: list of lists of reference sentences where each sentence should be a string. Returns: sari: sari score sacrebleu: sacrebleu score exact: exact score Examples: >>> sources=[\"About 95 species are currently accepted .\"] >>> predictions=[\"About 95 you now get in .\"] >>> references=[[\"About 95 species are currently known .\"]] >>> wiki_split = datasets.load_metric(\"wiki_split\") >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references) >>> print(results) {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0} """ def __snake_case ( lowerCAmelCase_ ) -> List[str]: def remove_articles(lowerCAmelCase_ ): SCREAMING_SNAKE_CASE__ = re.compile(r'''\b(a|an|the)\b''' , re.UNICODE ) return re.sub(lowerCAmelCase_ , ''' ''' , lowerCAmelCase_ ) def white_space_fix(lowerCAmelCase_ ): return " ".join(text.split() ) def remove_punc(lowerCAmelCase_ ): SCREAMING_SNAKE_CASE__ = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(lowerCAmelCase_ ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(lowerCAmelCase_ ) ) ) ) def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]: return int(normalize_answer(lowerCAmelCase_ ) == normalize_answer(lowerCAmelCase_ ) ) def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> int: SCREAMING_SNAKE_CASE__ = [any(compute_exact(lowerCAmelCase_ , lowerCAmelCase_ ) for ref in refs ) for pred, refs in zip(lowerCAmelCase_ , lowerCAmelCase_ )] return (sum(lowerCAmelCase_ ) / len(lowerCAmelCase_ )) * 1_0_0 def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any: SCREAMING_SNAKE_CASE__ = [rgram for rgrams in rgramslist for rgram in rgrams] SCREAMING_SNAKE_CASE__ = Counter(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE__ = Counter(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE__ = Counter() for sgram, scount in sgramcounter.items(): SCREAMING_SNAKE_CASE__ = scount * numref SCREAMING_SNAKE_CASE__ = Counter(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE__ = Counter() for cgram, ccount in cgramcounter.items(): SCREAMING_SNAKE_CASE__ = ccount * numref # KEEP SCREAMING_SNAKE_CASE__ = sgramcounter_rep & cgramcounter_rep SCREAMING_SNAKE_CASE__ = keepgramcounter_rep & rgramcounter SCREAMING_SNAKE_CASE__ = sgramcounter_rep & rgramcounter SCREAMING_SNAKE_CASE__ = 0 SCREAMING_SNAKE_CASE__ = 0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. SCREAMING_SNAKE_CASE__ = 1 SCREAMING_SNAKE_CASE__ = 1 if len(lowerCAmelCase_ ) > 0: SCREAMING_SNAKE_CASE__ = keeptmpscorea / len(lowerCAmelCase_ ) if len(lowerCAmelCase_ ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) SCREAMING_SNAKE_CASE__ = keeptmpscorea / sum(keepgramcounterall_rep.values() ) SCREAMING_SNAKE_CASE__ = 0 if keepscore_precision > 0 or keepscore_recall > 0: SCREAMING_SNAKE_CASE__ = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION SCREAMING_SNAKE_CASE__ = sgramcounter_rep - cgramcounter_rep SCREAMING_SNAKE_CASE__ = delgramcounter_rep - rgramcounter SCREAMING_SNAKE_CASE__ = sgramcounter_rep - rgramcounter SCREAMING_SNAKE_CASE__ = 0 SCREAMING_SNAKE_CASE__ = 0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. SCREAMING_SNAKE_CASE__ = 1 if len(lowerCAmelCase_ ) > 0: SCREAMING_SNAKE_CASE__ = deltmpscorea / len(lowerCAmelCase_ ) # ADDITION SCREAMING_SNAKE_CASE__ = set(lowerCAmelCase_ ) - set(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE__ = set(lowerCAmelCase_ ) & set(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE__ = set(lowerCAmelCase_ ) - set(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE__ = 0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. SCREAMING_SNAKE_CASE__ = 1 SCREAMING_SNAKE_CASE__ = 1 if len(lowerCAmelCase_ ) > 0: SCREAMING_SNAKE_CASE__ = addtmpscore / len(lowerCAmelCase_ ) if len(lowerCAmelCase_ ) > 0: SCREAMING_SNAKE_CASE__ = addtmpscore / len(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE__ = 0 if addscore_precision > 0 or addscore_recall > 0: SCREAMING_SNAKE_CASE__ = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]: SCREAMING_SNAKE_CASE__ = len(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE__ = ssent.split(''' ''' ) SCREAMING_SNAKE_CASE__ = csent.split(''' ''' ) SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = [] for rsent in rsents: SCREAMING_SNAKE_CASE__ = rsent.split(''' ''' ) SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = [] ragramslist.append(lowerCAmelCase_ ) for i in range(0 , len(lowerCAmelCase_ ) - 1 ): if i < len(lowerCAmelCase_ ) - 1: SCREAMING_SNAKE_CASE__ = ragrams[i] + ''' ''' + ragrams[i + 1] ragrams.append(lowerCAmelCase_ ) if i < len(lowerCAmelCase_ ) - 2: SCREAMING_SNAKE_CASE__ = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] ragrams.append(lowerCAmelCase_ ) if i < len(lowerCAmelCase_ ) - 3: SCREAMING_SNAKE_CASE__ = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3] ragrams.append(lowerCAmelCase_ ) ragramslist.append(lowerCAmelCase_ ) ragramslist.append(lowerCAmelCase_ ) ragramslist.append(lowerCAmelCase_ ) for i in range(0 , len(lowerCAmelCase_ ) - 1 ): if i < len(lowerCAmelCase_ ) - 1: SCREAMING_SNAKE_CASE__ = sagrams[i] + ''' ''' + sagrams[i + 1] sagrams.append(lowerCAmelCase_ ) if i < len(lowerCAmelCase_ ) - 2: SCREAMING_SNAKE_CASE__ = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] sagrams.append(lowerCAmelCase_ ) if i < len(lowerCAmelCase_ ) - 3: SCREAMING_SNAKE_CASE__ = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3] sagrams.append(lowerCAmelCase_ ) for i in range(0 , len(lowerCAmelCase_ ) - 1 ): if i < len(lowerCAmelCase_ ) - 1: SCREAMING_SNAKE_CASE__ = cagrams[i] + ''' ''' + cagrams[i + 1] cagrams.append(lowerCAmelCase_ ) if i < len(lowerCAmelCase_ ) - 2: SCREAMING_SNAKE_CASE__ = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] cagrams.append(lowerCAmelCase_ ) if i < len(lowerCAmelCase_ ) - 3: SCREAMING_SNAKE_CASE__ = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3] cagrams.append(lowerCAmelCase_ ) ((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) = SARIngram(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) = SARIngram(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) = SARIngram(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) = SARIngram(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) SCREAMING_SNAKE_CASE__ = sum([keepascore, keepascore, keepascore, keepascore] ) / 4 SCREAMING_SNAKE_CASE__ = sum([delascore, delascore, delascore, delascore] ) / 4 SCREAMING_SNAKE_CASE__ = sum([addascore, addascore, addascore, addascore] ) / 4 SCREAMING_SNAKE_CASE__ = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ = True , lowerCAmelCase_ = "13a" , lowerCAmelCase_ = True ) -> Optional[Any]: # Normalization is requried for the ASSET dataset (one of the primary # datasets in sentence simplification) to allow using space # to split the sentence. Even though Wiki-Auto and TURK datasets, # do not require normalization, we do it for consistency. # Code adapted from the EASSE library [1] written by the authors of the ASSET dataset. # [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7 if lowercase: SCREAMING_SNAKE_CASE__ = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: SCREAMING_SNAKE_CASE__ = sacrebleu.metrics.bleu._get_tokenizer(lowerCAmelCase_ )()(lowerCAmelCase_ ) else: SCREAMING_SNAKE_CASE__ = sacrebleu.TOKENIZERS[tokenizer]()(lowerCAmelCase_ ) elif tokenizer == "moses": SCREAMING_SNAKE_CASE__ = sacremoses.MosesTokenizer().tokenize(lowerCAmelCase_ , return_str=lowerCAmelCase_ , escape=lowerCAmelCase_ ) elif tokenizer == "penn": SCREAMING_SNAKE_CASE__ = sacremoses.MosesTokenizer().penn_tokenize(lowerCAmelCase_ , return_str=lowerCAmelCase_ ) else: SCREAMING_SNAKE_CASE__ = sentence if not return_str: SCREAMING_SNAKE_CASE__ = normalized_sent.split() return normalized_sent def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]: if not (len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )): raise ValueError('''Sources length must match predictions and references lengths.''' ) SCREAMING_SNAKE_CASE__ = 0 for src, pred, refs in zip(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): sari_score += SARIsent(normalize(lowerCAmelCase_ ) , normalize(lowerCAmelCase_ ) , [normalize(lowerCAmelCase_ ) for sent in refs] ) SCREAMING_SNAKE_CASE__ = sari_score / len(lowerCAmelCase_ ) return 1_0_0 * sari_score def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="exp" , lowerCAmelCase_=None , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ) -> Any: SCREAMING_SNAKE_CASE__ = len(references[0] ) if any(len(lowerCAmelCase_ ) != references_per_prediction for refs in references ): raise ValueError('''Sacrebleu requires the same number of references for each prediction''' ) SCREAMING_SNAKE_CASE__ = [[refs[i] for refs in references] for i in range(lowerCAmelCase_ )] SCREAMING_SNAKE_CASE__ = sacrebleu.corpus_bleu( lowerCAmelCase_ , lowerCAmelCase_ , smooth_method=lowerCAmelCase_ , smooth_value=lowerCAmelCase_ , force=lowerCAmelCase_ , lowercase=lowerCAmelCase_ , use_effective_order=lowerCAmelCase_ , ) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __snake_case ( datasets.Metric ): '''simple docstring''' def lowercase_ ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ), } ) , codebase_urls=[ '''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''', '''https://github.com/cocoxu/simplification/blob/master/SARI.py''', '''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''', '''https://github.com/mjpost/sacreBLEU''', ] , reference_urls=[ '''https://www.aclweb.org/anthology/Q16-1029.pdf''', '''https://github.com/mjpost/sacreBLEU''', '''https://en.wikipedia.org/wiki/BLEU''', '''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''', ] , ) def lowercase_ ( self , A_ , A_ , A_ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = {} result.update({'''sari''': compute_sari(sources=A_ , predictions=A_ , references=A_ )} ) result.update({'''sacrebleu''': compute_sacrebleu(predictions=A_ , references=A_ )} ) result.update({'''exact''': compute_em(predictions=A_ , references=A_ )} ) return result
100
'''simple docstring''' import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class UpperCamelCase__( unittest.TestCase ): def a__( self : List[str] )-> Optional[Any]: """simple docstring""" UpperCAmelCase = inspect.getfile(accelerate.test_utils ) UpperCAmelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] ) UpperCAmelCase = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] ) UpperCAmelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] ) @require_multi_gpu def a__( self : Optional[Any] )-> str: """simple docstring""" print(F"""Found {torch.cuda.device_count()} devices.""" ) UpperCAmelCase = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(lowerCAmelCase , env=os.environ.copy() ) @require_multi_gpu def a__( self : str )-> int: """simple docstring""" print(F"""Found {torch.cuda.device_count()} devices.""" ) UpperCAmelCase = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path] print(F"""Command: {cmd}""" ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(lowerCAmelCase , env=os.environ.copy() ) @require_multi_gpu def a__( self : Dict )-> List[Any]: """simple docstring""" UpperCAmelCase = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(lowerCAmelCase , env=os.environ.copy() ) @require_multi_gpu def a__( self : Dict )-> Union[str, Any]: """simple docstring""" print(F"""Found {torch.cuda.device_count()} devices, using 2 devices only""" ) UpperCAmelCase = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ): execute_subprocess_async(lowerCAmelCase , env=os.environ.copy() ) if __name__ == "__main__": _lowercase : List[Any] = Accelerator() _lowercase : Union[str, Any] = (accelerator.state.process_index + 2, 10) _lowercase : List[Any] = torch.randint(0, 10, shape).to(accelerator.device) _lowercase : Any = """""" _lowercase : Optional[int] = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." _lowercase : str = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." _lowercase : Dict = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
210
0
from __future__ import annotations import math from collections.abc import Callable def UpperCAmelCase ( _snake_case , _snake_case , _snake_case , _snake_case = 100 , ): lowerCAmelCase = x_start lowerCAmelCase = fnc(_snake_case ) lowerCAmelCase = 0.0 for _ in range(_snake_case ): # Approximates curve as a sequence of linear lines and sums their length lowerCAmelCase = (x_end - x_start) / steps + xa lowerCAmelCase = fnc(_snake_case ) length += math.hypot(xa - xa , fxa - fxa ) # Increment step lowerCAmelCase = xa lowerCAmelCase = fxa return length if __name__ == "__main__": def UpperCAmelCase ( _snake_case ): return math.sin(10 * x ) print("""f(x) = sin(10 * x)""") print("""The length of the curve from x = -10 to x = 10 is:""") UpperCAmelCase_ =10 while i <= 10_0000: print(F'''With {i} steps: {line_length(f, -10, 10, i)}''') i *= 10
33
import os import pickle import unittest from transformers import AutoTokenizer from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.models.bert_japanese.tokenization_bert_japanese import ( VOCAB_FILES_NAMES, BertJapaneseTokenizer, CharacterTokenizer, JumanppTokenizer, MecabTokenizer, SudachiTokenizer, WordpieceTokenizer, ) from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi from ...test_tokenization_common import TokenizerTesterMixin @custom_tokenizers class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' __a : Any =BertJapaneseTokenizer __a : Optional[int] =False __a : int =True def __snake_case ( self ): super().setUp() lowerCAmelCase = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''', '''世界''', '''##世界''', '''、''', '''##、''', '''。''', '''##。''', ] lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = '''こんにちは、世界。 \nこんばんは、世界。''' lowerCAmelCase = '''こんにちは 、 世界 。 こんばんは 、 世界 。''' return input_text, output_text def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase , lowerCAmelCase = self.get_input_output_texts(UpperCAmelCase_ ) lowerCAmelCase = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.decode(UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ ) return text, ids def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class(self.vocab_file ) lowerCAmelCase = tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''' ) self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''mecab''' ) self.assertIsNotNone(UpperCAmelCase_ ) lowerCAmelCase = '''こんにちは、世界。\nこんばんは、世界。''' lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) lowerCAmelCase = os.path.join(self.tmpdirname , '''tokenizer.bin''' ) with open(UpperCAmelCase_ , '''wb''' ) as handle: pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ ) with open(UpperCAmelCase_ , '''rb''' ) as handle: lowerCAmelCase = pickle.load(UpperCAmelCase_ ) lowerCAmelCase = tokenizer_new.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def __snake_case ( self ): lowerCAmelCase = MecabTokenizer(mecab_dic='''ipadic''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def __snake_case ( self ): try: lowerCAmelCase = MecabTokenizer(mecab_dic='''unidic_lite''' ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def __snake_case ( self ): try: lowerCAmelCase = MecabTokenizer(mecab_dic='''unidic''' ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def __snake_case ( self ): lowerCAmelCase = MecabTokenizer(do_lower_case=UpperCAmelCase_ , mecab_dic='''ipadic''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def __snake_case ( self ): try: lowerCAmelCase = MecabTokenizer( do_lower_case=UpperCAmelCase_ , normalize_text=UpperCAmelCase_ , mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''' ) except RuntimeError: # if dict doesn't exist in the system, previous code raises this error. return self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) def __snake_case ( self ): lowerCAmelCase = MecabTokenizer(normalize_text=UpperCAmelCase_ , mecab_dic='''ipadic''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''] , ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''sudachi''' ) self.assertIsNotNone(UpperCAmelCase_ ) lowerCAmelCase = '''こんにちは、世界。\nこんばんは、世界。''' lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) lowerCAmelCase = os.path.join(self.tmpdirname , '''tokenizer.bin''' ) with open(UpperCAmelCase_ , '''wb''' ) as handle: pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ ) with open(UpperCAmelCase_ , '''rb''' ) as handle: lowerCAmelCase = pickle.load(UpperCAmelCase_ ) lowerCAmelCase = tokenizer_new.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''A''' ) self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国''', '''人''', '''参政''', '''権'''] ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''B''' ) self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人''', '''参政権'''] ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''C''' ) self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人参政権'''] ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(do_lower_case=UpperCAmelCase_ , sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(normalize_text=UpperCAmelCase_ , sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''] , ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(trim_whitespace=UpperCAmelCase_ , sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''jumanpp''' ) self.assertIsNotNone(UpperCAmelCase_ ) lowerCAmelCase = '''こんにちは、世界。\nこんばんは、世界。''' lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) lowerCAmelCase = os.path.join(self.tmpdirname , '''tokenizer.bin''' ) with open(UpperCAmelCase_ , '''wb''' ) as handle: pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ ) with open(UpperCAmelCase_ , '''rb''' ) as handle: lowerCAmelCase = pickle.load(UpperCAmelCase_ ) lowerCAmelCase = tokenizer_new.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = JumanppTokenizer(do_lower_case=UpperCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = JumanppTokenizer(normalize_text=UpperCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = JumanppTokenizer(trim_whitespace=UpperCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''] , ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''' ) , ['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''] , ) def __snake_case ( self ): lowerCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは'''] lowerCAmelCase = {} for i, token in enumerate(UpperCAmelCase_ ): lowerCAmelCase = i lowerCAmelCase = WordpieceTokenizer(vocab=UpperCAmelCase_ , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こんにちは'''] ) self.assertListEqual(tokenizer.tokenize('''こんばんは''' ) , ['''こん''', '''##ばんは'''] ) self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''' ) , ['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''] ) def __snake_case ( self ): lowerCAmelCase = BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''' ) lowerCAmelCase = tokenizer.subword_tokenizer lowerCAmelCase = subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''' ) self.assertListEqual(UpperCAmelCase_ , ['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''] ) lowerCAmelCase = subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''' ) self.assertListEqual(UpperCAmelCase_ , ['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''] ) def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''' ) lowerCAmelCase = tokenizer.encode('''ありがとう。''' , add_special_tokens=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.encode('''どういたしまして。''' , add_special_tokens=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ ) lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' __a : Union[str, Any] =BertJapaneseTokenizer __a : Optional[int] =False def __snake_case ( self ): super().setUp() lowerCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。'''] lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def __snake_case ( self , **UpperCAmelCase_ ): return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='''character''' , **UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = '''こんにちは、世界。 \nこんばんは、世界。''' lowerCAmelCase = '''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。''' return input_text, output_text def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='''character''' ) lowerCAmelCase = tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''' ) self.assertListEqual( UpperCAmelCase_ , ['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] ) def __snake_case ( self ): lowerCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。'''] lowerCAmelCase = {} for i, token in enumerate(UpperCAmelCase_ ): lowerCAmelCase = i lowerCAmelCase = CharacterTokenizer(vocab=UpperCAmelCase_ , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''は'''] ) self.assertListEqual(tokenizer.tokenize('''こんにちほ''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''] ) def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''' ) lowerCAmelCase = tokenizer.encode('''ありがとう。''' , add_special_tokens=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.encode('''どういたしまして。''' , add_special_tokens=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ ) lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self ): lowerCAmelCase = '''cl-tohoku/bert-base-japanese''' lowerCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self ): lowerCAmelCase = '''cl-tohoku/bert-base-japanese''' with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm: BertTokenizer.from_pretrained(UpperCAmelCase_ ) self.assertTrue( cm.records[0].message.startswith( '''The tokenizer class you load from this checkpoint is not the same type as the class this function''' ''' is called from.''' ) ) lowerCAmelCase = '''bert-base-cased''' with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm: BertJapaneseTokenizer.from_pretrained(UpperCAmelCase_ ) self.assertTrue( cm.records[0].message.startswith( '''The tokenizer class you load from this checkpoint is not the same type as the class this function''' ''' is called from.''' ) )
33
1
from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class _lowerCamelCase ( UpperCamelCase ): """simple docstring""" def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = "arrow" , **_SCREAMING_SNAKE_CASE , )->Any: '''simple docstring''' super().__init__( split=_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , keep_in_memory=_SCREAMING_SNAKE_CASE , streaming=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) A_ : Optional[Any] = load_from_cache_file A_ : Any = file_format A_ : List[Any] = Spark( df=_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , working_dir=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) def _snake_case ( self )->List[str]: '''simple docstring''' if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) A_ : Tuple = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=_SCREAMING_SNAKE_CASE , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
590
import argparse import shlex import runhouse as rh if __name__ == "__main__": # Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access # setup instructions, if using on-demand hardware # If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster # If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster # Throw an error if user passes both BYO and on-demand cluster args # Otherwise, use default values UpperCamelCase = argparse.ArgumentParser() parser.add_argument("""--user""", type=str, default="""ubuntu""") parser.add_argument("""--host""", type=str, default="""localhost""") parser.add_argument("""--key_path""", type=str, default=None) parser.add_argument("""--instance""", type=str, default="""V100:1""") parser.add_argument("""--provider""", type=str, default="""cheapest""") parser.add_argument("""--use_spot""", type=bool, default=False) parser.add_argument("""--example""", type=str, default="""pytorch/text-generation/run_generation.py""") UpperCamelCase , UpperCamelCase = parser.parse_known_args() if args.host != "localhost": if args.instance != "V100:1" or args.provider != "cheapest": raise ValueError("""Cannot specify both BYO and on-demand cluster args""") UpperCamelCase = rh.cluster( name="""rh-cluster""", ips=[args.host], ssh_creds={"""ssh_user""": args.user, """ssh_private_key""": args.key_path} ) else: UpperCamelCase = rh.cluster( name="""rh-cluster""", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot ) UpperCamelCase = args.example.rsplit("""/""", 1)[0] # Set up remote environment cluster.install_packages(["""pip:./"""]) # Installs transformers from local source # Note transformers is copied into the home directory on the remote machine, so we can install from there cluster.run([F'''pip install -r transformers/examples/{example_dir}/requirements.txt''']) cluster.run(["""pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"""]) # Run example. You can bypass the CLI wrapper and paste your own code here. cluster.run([F'''python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}''']) # Alternatively, we can just import and run a training function (especially if there's no wrapper CLI): # from my_script... import train # reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard'] # launch_train_gpu = rh.function(fn=train, # system=gpu, # reqs=reqs, # name='train_bert_glue') # # We can pass in arguments just like we would to a function: # launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16 # stream_logs=True)
590
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a = logging.get_logger(__name__) a = { 'google/mobilenet_v2_1.4_224': 'https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json', 'google/mobilenet_v2_1.0_224': 'https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json', 'google/mobilenet_v2_0.75_160': 'https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json', 'google/mobilenet_v2_0.35_96': 'https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json', # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class SCREAMING_SNAKE_CASE__ ( _a ): _a = 'mobilenet_v2' def __init__( self : Optional[Any] , lowerCAmelCase : Dict=3 , lowerCAmelCase : Union[str, Any]=224 , lowerCAmelCase : Optional[int]=1.0 , lowerCAmelCase : Tuple=8 , lowerCAmelCase : str=8 , lowerCAmelCase : Tuple=6 , lowerCAmelCase : Dict=32 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : List[str]="relu6" , lowerCAmelCase : List[str]=True , lowerCAmelCase : Dict=0.8 , lowerCAmelCase : Dict=0.02 , lowerCAmelCase : Any=0.001 , lowerCAmelCase : Tuple=255 , **lowerCAmelCase : Optional[Any] , ): super().__init__(**lowerCAmelCase ) if depth_multiplier <= 0: raise ValueError("""depth_multiplier must be greater than zero.""" ) lowerCAmelCase = num_channels lowerCAmelCase = image_size lowerCAmelCase = depth_multiplier lowerCAmelCase = depth_divisible_by lowerCAmelCase = min_depth lowerCAmelCase = expand_ratio lowerCAmelCase = output_stride lowerCAmelCase = first_layer_is_expansion lowerCAmelCase = finegrained_output lowerCAmelCase = hidden_act lowerCAmelCase = tf_padding lowerCAmelCase = classifier_dropout_prob lowerCAmelCase = initializer_range lowerCAmelCase = layer_norm_eps lowerCAmelCase = semantic_loss_ignore_index class SCREAMING_SNAKE_CASE__ ( _a ): _a = version.parse('1.11' ) @property def __lowercase ( self : Any ): return OrderedDict([("""pixel_values""", {0: """batch"""})] ) @property def __lowercase ( self : Any ): if self.task == "image-classification": return OrderedDict([("""logits""", {0: """batch"""})] ) else: return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] ) @property def __lowercase ( self : Tuple ): return 1e-4
529
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a = { 'configuration_time_series_transformer': [ 'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimeSeriesTransformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ 'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TimeSeriesTransformerForPrediction', 'TimeSeriesTransformerModel', 'TimeSeriesTransformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
529
1
"""simple docstring""" import warnings from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __A ( SCREAMING_SNAKE_CASE_ ): _UpperCamelCase : Any = ["image_processor", "tokenizer"] _UpperCamelCase : Optional[int] = "FlavaImageProcessor" _UpperCamelCase : Union[str, Any] = ("BertTokenizer", "BertTokenizerFast") def __init__( self , a__=None , a__=None , **a__ ): _lowerCAmelCase : Union[str, Any] = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , a__ , ) _lowerCAmelCase : Union[str, Any] = kwargs.pop("""feature_extractor""" ) _lowerCAmelCase : Any = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(a__ , a__ ) _lowerCAmelCase : str = self.image_processor def __call__( self , a__ = None , a__ = None , a__ = True , a__ = False , a__ = False , a__ = None , a__ = 0 , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = False , a__ = False , a__ = False , a__ = False , a__ = True , a__ = None , **a__ , ): if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: _lowerCAmelCase : Union[str, Any] = self.tokenizer( text=a__ , add_special_tokens=a__ , padding=a__ , truncation=a__ , max_length=a__ , stride=a__ , pad_to_multiple_of=a__ , return_token_type_ids=a__ , return_attention_mask=a__ , return_overflowing_tokens=a__ , return_special_tokens_mask=a__ , return_offsets_mapping=a__ , return_length=a__ , verbose=a__ , return_tensors=a__ , **a__ , ) if images is not None: _lowerCAmelCase : Optional[Any] = self.image_processor( a__ , return_image_mask=a__ , return_codebook_pixels=a__ , return_tensors=a__ , **a__ , ) if text is not None and images is not None: encoding.update(a__ ) return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ ) def __A ( self , *a__ , **a__ ): return self.tokenizer.batch_decode(*a__ , **a__ ) def __A ( self , *a__ , **a__ ): return self.tokenizer.decode(*a__ , **a__ ) @property def __A ( self ): _lowerCAmelCase : Union[str, Any] = self.tokenizer.model_input_names _lowerCAmelCase : int = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def __A ( self ): warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , a__ , ) return self.image_processor_class @property def __A ( self ): warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , a__ , ) return self.image_processor
213
"""simple docstring""" from timeit import timeit def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> int: if number < 0: raise ValueError("""the value of input must not be negative""" ) _lowerCAmelCase : Dict = 0 while number: number &= number - 1 result += 1 return result def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> int: if number < 0: raise ValueError("""the value of input must not be negative""" ) _lowerCAmelCase : Optional[Any] = 0 while number: if number % 2 == 1: result += 1 number >>= 1 return result def SCREAMING_SNAKE_CASE ( ) -> None: def do_benchmark(_lowerCamelCase : int ) -> None: _lowerCAmelCase : Dict = """import __main__ as z""" print(f"Benchmark when {number = }:" ) print(f"{get_set_bits_count_using_modulo_operator(_lowerCamelCase ) = }" ) _lowerCAmelCase : List[Any] = timeit("""z.get_set_bits_count_using_modulo_operator(25)""" ,setup=_lowerCamelCase ) print(f"timeit() runs in {timing} seconds" ) print(f"{get_set_bits_count_using_brian_kernighans_algorithm(_lowerCamelCase ) = }" ) _lowerCAmelCase : str = timeit( """z.get_set_bits_count_using_brian_kernighans_algorithm(25)""" ,setup=_lowerCamelCase ,) print(f"timeit() runs in {timing} seconds" ) for number in (25, 37, 58, 0): do_benchmark(_lowerCamelCase ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
213
1
import os def __lowerCamelCase ( __lowerCAmelCase : str = "input.txt" ) -> int: with open(os.path.join(os.path.dirname(__lowerCAmelCase ) , __lowerCAmelCase ) ) as input_file: __UpperCamelCase : List[Any] = [ [int(__lowerCAmelCase ) for element in line.split(""",""" )] for line in input_file.readlines() ] __UpperCamelCase : List[str] = len(__lowerCAmelCase ) __UpperCamelCase : Dict = len(matrix[0] ) __UpperCamelCase : Union[str, Any] = [[-1 for _ in range(__lowerCAmelCase )] for _ in range(__lowerCAmelCase )] for i in range(__lowerCAmelCase ): __UpperCamelCase : Union[str, Any] = matrix[i][0] for j in range(1 , __lowerCAmelCase ): for i in range(__lowerCAmelCase ): __UpperCamelCase : List[str] = minimal_path_sums[i][j - 1] + matrix[i][j] for i in range(1 , __lowerCAmelCase ): __UpperCamelCase : str = min( minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] ) for i in range(rows - 2 , -1 , -1 ): __UpperCamelCase : Tuple = min( minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] ) return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums ) if __name__ == "__main__": print(F"""{solution() = }""")
515
import unittest import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class _A ( unittest.TestCase ): @property def a ( self : Tuple ): """simple docstring""" torch.manual_seed(0 ) __UpperCamelCase : int = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , ) return model def a ( self : Tuple ): """simple docstring""" __UpperCamelCase : str = self.dummy_uncond_unet __UpperCamelCase : Optional[int] = KarrasVeScheduler() __UpperCamelCase : List[str] = KarrasVePipeline(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ ) pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __UpperCamelCase : List[str] = torch.manual_seed(0 ) __UpperCamelCase : Any = pipe(num_inference_steps=2 , generator=lowerCamelCase__ , output_type="""numpy""" ).images __UpperCamelCase : Tuple = torch.manual_seed(0 ) __UpperCamelCase : int = pipe(num_inference_steps=2 , generator=lowerCamelCase__ , output_type="""numpy""" , return_dict=lowerCamelCase__ )[0] __UpperCamelCase : Union[str, Any] = image[0, -3:, -3:, -1] __UpperCamelCase : Any = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __UpperCamelCase : List[Any] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class _A ( unittest.TestCase ): def a ( self : Optional[Any] ): """simple docstring""" __UpperCamelCase : Union[str, Any] = """google/ncsnpp-celebahq-256""" __UpperCamelCase : Any = UNetaDModel.from_pretrained(lowerCamelCase__ ) __UpperCamelCase : Optional[Any] = KarrasVeScheduler() __UpperCamelCase : Dict = KarrasVePipeline(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ ) pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __UpperCamelCase : Tuple = torch.manual_seed(0 ) __UpperCamelCase : Union[str, Any] = pipe(num_inference_steps=20 , generator=lowerCamelCase__ , output_type="""numpy""" ).images __UpperCamelCase : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 2_56, 2_56, 3) __UpperCamelCase : List[str] = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
515
1
import json import os from typing import Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a_ = logging.get_logger(__name__) a_ = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', } a_ = { 'vocab_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'}, 'merges_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'}, } a_ = { 'ctrl': 256, } a_ = { 'Pregnancy': 16_8629, 'Christianity': 7675, 'Explain': 10_6423, 'Fitness': 6_3440, 'Saving': 6_3163, 'Ask': 2_7171, 'Ass': 9_5985, 'Joke': 16_3509, 'Questions': 4_5622, 'Thoughts': 4_9605, 'Retail': 5_2342, 'Feminism': 16_4338, 'Writing': 1_1992, 'Atheism': 19_2263, 'Netflix': 4_8616, 'Computing': 3_9639, 'Opinion': 4_3213, 'Alone': 4_4967, 'Funny': 5_8917, 'Gaming': 4_0358, 'Human': 4088, 'India': 1331, 'Joker': 7_7138, 'Diet': 3_6206, 'Legal': 1_1859, 'Norman': 4939, 'Tip': 7_2689, 'Weight': 5_2343, 'Movies': 4_6273, 'Running': 2_3425, 'Science': 2090, 'Horror': 3_7793, 'Confession': 6_0572, 'Finance': 1_2250, 'Politics': 1_6360, 'Scary': 19_1985, 'Support': 1_2654, 'Technologies': 3_2516, 'Teenage': 6_6160, 'Event': 3_2769, 'Learned': 6_7460, 'Notion': 18_2770, 'Wikipedia': 3_7583, 'Books': 6665, 'Extract': 7_6050, 'Confessions': 10_2701, 'Conspiracy': 7_5932, 'Links': 6_3674, 'Narcissus': 15_0425, 'Relationship': 5_4766, 'Relationships': 13_4796, 'Reviews': 4_1671, 'News': 4256, 'Translation': 2_6820, 'multilingual': 12_8406, } def lowerCamelCase__ ( _a): SCREAMING_SNAKE_CASE : Any = set() SCREAMING_SNAKE_CASE : Optional[int] = word[0] for char in word[1:]: pairs.add((prev_char, char)) SCREAMING_SNAKE_CASE : Optional[int] = char SCREAMING_SNAKE_CASE : List[str] = set(_a) return pairs class _UpperCamelCase ( __A ): '''simple docstring''' lowerCamelCase__ =VOCAB_FILES_NAMES lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ =CONTROL_CODES def __init__( self : List[Any] , a : int , a : str , a : Optional[Any]="<unk>" , **a : List[str] ) -> List[str]: """simple docstring""" super().__init__(unk_token=a , **a ) with open(a , encoding="utf-8" ) as vocab_handle: SCREAMING_SNAKE_CASE : List[Any] = json.load(a ) SCREAMING_SNAKE_CASE : List[Any] = {v: k for k, v in self.encoder.items()} with open(a , encoding="utf-8" ) as merges_handle: SCREAMING_SNAKE_CASE : Dict = merges_handle.read().split("\n" )[1:-1] SCREAMING_SNAKE_CASE : str = [tuple(merge.split() ) for merge in merges] SCREAMING_SNAKE_CASE : Union[str, Any] = dict(zip(a , range(len(a ) ) ) ) SCREAMING_SNAKE_CASE : List[str] = {} @property def __UpperCamelCase ( self : Tuple ) -> Union[str, Any]: """simple docstring""" return len(self.encoder ) def __UpperCamelCase ( self : int ) -> Dict: """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def __UpperCamelCase ( self : List[str] , a : Union[str, Any] ) -> Any: """simple docstring""" if token in self.cache: return self.cache[token] SCREAMING_SNAKE_CASE : Tuple = tuple(a ) SCREAMING_SNAKE_CASE : Dict = tuple(list(word[:-1] ) + [word[-1] + "</w>"] ) SCREAMING_SNAKE_CASE : Tuple = get_pairs(a ) if not pairs: return token while True: SCREAMING_SNAKE_CASE : Union[str, Any] = min(a , key=lambda a : self.bpe_ranks.get(a , float("inf" ) ) ) if bigram not in self.bpe_ranks: break SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[Any] = bigram SCREAMING_SNAKE_CASE : List[Any] = [] SCREAMING_SNAKE_CASE : Any = 0 while i < len(a ): try: SCREAMING_SNAKE_CASE : Tuple = word.index(a , a ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) SCREAMING_SNAKE_CASE : List[Any] = j if word[i] == first and i < len(a ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 SCREAMING_SNAKE_CASE : Tuple = tuple(a ) SCREAMING_SNAKE_CASE : str = new_word if len(a ) == 1: break else: SCREAMING_SNAKE_CASE : Dict = get_pairs(a ) SCREAMING_SNAKE_CASE : Tuple = "@@ ".join(a ) SCREAMING_SNAKE_CASE : Tuple = word[:-4] SCREAMING_SNAKE_CASE : str = word return word def __UpperCamelCase ( self : Union[str, Any] , a : Dict ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : int = [] SCREAMING_SNAKE_CASE : Tuple = re.findall(R"\S+\n?" , a ) for token in words: split_tokens.extend(list(self.bpe(a ).split(" " ) ) ) return split_tokens def __UpperCamelCase ( self : Tuple , a : Optional[Any] ) -> Tuple: """simple docstring""" return self.encoder.get(a , self.encoder.get(self.unk_token ) ) def __UpperCamelCase ( self : Any , a : int ) -> Tuple: """simple docstring""" return self.decoder.get(a , self.unk_token ) def __UpperCamelCase ( self : Dict , a : Union[str, Any] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = " ".join(a ).replace("@@ " , "" ).strip() return out_string def __UpperCamelCase ( self : int , a : str , a : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(a ): logger.error(F"Vocabulary path ({save_directory}) should be a directory" ) return SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join( a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) SCREAMING_SNAKE_CASE : Any = os.path.join( a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(a , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=a , ensure_ascii=a ) + "\n" ) SCREAMING_SNAKE_CASE : List[str] = 0 with open(a , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a : kv[1] ): if index != token_index: logger.warning( F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!" ) SCREAMING_SNAKE_CASE : Dict = token_index writer.write(" ".join(a ) + "\n" ) index += 1 return vocab_file, merge_file # def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True): # filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)) # tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens) # tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far) # return ''.join(tokens_generated_so_far)
25
from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class _UpperCamelCase ( lowerCamelCase__ ): '''simple docstring''' def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : Distribution , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0 ): _a = 1.0 if scale is None else scale _a = 0.0 if loc is None else loc super().__init__(SCREAMING_SNAKE_CASE_ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=SCREAMING_SNAKE_CASE_ )] ) @property def _UpperCAmelCase ( self : List[str] ): return self.base_dist.mean * self.scale + self.loc @property def _UpperCAmelCase ( self : Union[str, Any] ): return self.base_dist.variance * self.scale**2 @property def _UpperCAmelCase ( self : int ): return self.variance.sqrt() class _UpperCamelCase ( nn.Module ): '''simple docstring''' def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict[str, int] , SCREAMING_SNAKE_CASE_ : Callable[..., Tuple[torch.Tensor]] , **SCREAMING_SNAKE_CASE_ : int ): super().__init__(**SCREAMING_SNAKE_CASE_ ) _a = args_dim _a = nn.ModuleList([nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for dim in args_dim.values()] ) _a = domain_map def _UpperCAmelCase ( self : int , SCREAMING_SNAKE_CASE_ : torch.Tensor ): _a = [proj(SCREAMING_SNAKE_CASE_ ) for proj in self.proj] return self.domain_map(*SCREAMING_SNAKE_CASE_ ) class _UpperCamelCase ( nn.Module ): '''simple docstring''' def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Dict ): super().__init__() _a = function def _UpperCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , *SCREAMING_SNAKE_CASE_ : int ): return self.function(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ) class _UpperCamelCase : '''simple docstring''' _A = 42 _A = 42 _A = 42 def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int = 1 ): _a = dim _a = {k: dim * self.args_dim[k] for k in self.args_dim} def _UpperCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] ): if self.dim == 1: return self.distribution_class(*SCREAMING_SNAKE_CASE_ ) else: return Independent(self.distribution_class(*SCREAMING_SNAKE_CASE_ ) , 1 ) def _UpperCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE_ : Optional[torch.Tensor] = None , ): _a = self._base_distribution(SCREAMING_SNAKE_CASE_ ) if loc is None and scale is None: return distr else: return AffineTransformed(SCREAMING_SNAKE_CASE_ , loc=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , event_dim=self.event_dim ) @property def _UpperCAmelCase ( self : Union[str, Any] ): return () if self.dim == 1 else (self.dim,) @property def _UpperCAmelCase ( self : Any ): return len(self.event_shape ) @property def _UpperCAmelCase ( self : Tuple ): return 0.0 def _UpperCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE_ : int ): return ParameterProjection( in_features=SCREAMING_SNAKE_CASE_ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , ) def _UpperCAmelCase ( self : Dict , *SCREAMING_SNAKE_CASE_ : torch.Tensor ): raise NotImplementedError() @staticmethod def _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ : torch.Tensor ): return (x + torch.sqrt(torch.square(SCREAMING_SNAKE_CASE_ ) + 4.0 )) / 2.0 class _UpperCamelCase ( lowerCamelCase__ ): '''simple docstring''' _A = {"df": 1, "loc": 1, "scale": 1} _A = StudentT @classmethod def _UpperCAmelCase ( cls : Union[str, Any] , SCREAMING_SNAKE_CASE_ : torch.Tensor , SCREAMING_SNAKE_CASE_ : torch.Tensor , SCREAMING_SNAKE_CASE_ : torch.Tensor ): _a = cls.squareplus(SCREAMING_SNAKE_CASE_ ).clamp_min(torch.finfo(scale.dtype ).eps ) _a = 2.0 + cls.squareplus(SCREAMING_SNAKE_CASE_ ) return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 ) class _UpperCamelCase ( lowerCamelCase__ ): '''simple docstring''' _A = {"loc": 1, "scale": 1} _A = Normal @classmethod def _UpperCAmelCase ( cls : Optional[int] , SCREAMING_SNAKE_CASE_ : torch.Tensor , SCREAMING_SNAKE_CASE_ : torch.Tensor ): _a = cls.squareplus(SCREAMING_SNAKE_CASE_ ).clamp_min(torch.finfo(scale.dtype ).eps ) return loc.squeeze(-1 ), scale.squeeze(-1 ) class _UpperCamelCase ( lowerCamelCase__ ): '''simple docstring''' _A = {"total_count": 1, "logits": 1} _A = NegativeBinomial @classmethod def _UpperCAmelCase ( cls : Optional[int] , SCREAMING_SNAKE_CASE_ : torch.Tensor , SCREAMING_SNAKE_CASE_ : torch.Tensor ): _a = cls.squareplus(SCREAMING_SNAKE_CASE_ ) return total_count.squeeze(-1 ), logits.squeeze(-1 ) def _UpperCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] ): _a , _a = distr_args if self.dim == 1: return self.distribution_class(total_count=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ ) else: return Independent(self.distribution_class(total_count=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ ) , 1 ) def _UpperCAmelCase ( self : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE_ : Optional[torch.Tensor] = None ): _a , _a = distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits) )
562
0
'''simple docstring''' def __lowercase ( __SCREAMING_SNAKE_CASE = 100_0000 ) -> int: """simple docstring""" __a = limit + 1 __a = [0] * limit for first_term in range(1 , __SCREAMING_SNAKE_CASE ): for n in range(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): __a = first_term + n / first_term if common_difference % 4: # d must be divisble by 4 continue else: common_difference /= 4 if ( first_term > common_difference and first_term < 4 * common_difference ): # since x,y,z are positive integers frequency[n] += 1 # so z>0 and a>d ,also 4d<a __a = sum(1 for x in frequency[1:limit] if x == 10 ) return count if __name__ == "__main__": print(f"""{solution() = }""")
701
'''simple docstring''' from __future__ import annotations import os from typing import Any import requests SCREAMING_SNAKE_CASE_ = 'https://api.github.com' # https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user SCREAMING_SNAKE_CASE_ = BASE_URL + '/user' # https://github.com/settings/tokens SCREAMING_SNAKE_CASE_ = os.environ.get('USER_TOKEN', '') def __lowercase ( __SCREAMING_SNAKE_CASE ) -> dict[Any, Any]: """simple docstring""" __a = { """Authorization""": F'''token {auth_token}''', """Accept""": """application/vnd.github.v3+json""", } return requests.get(__SCREAMING_SNAKE_CASE , headers=__SCREAMING_SNAKE_CASE ).json() if __name__ == "__main__": # pragma: no cover if USER_TOKEN: for key, value in fetch_github_info(USER_TOKEN).items(): print(f"""{key}: {value}""") else: raise ValueError('\'USER_TOKEN\' field cannot be empty.')
201
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase = { """configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = ["""LlamaTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = ["""LlamaTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = [ """LlamaForCausalLM""", """LlamaModel""", """LlamaPreTrainedModel""", """LlamaForSequenceClassification""", ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
88
"""simple docstring""" import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def A__ ( __lowerCamelCase ): """simple docstring""" # encoder.embeddings are double copied in original FLAVA return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() ) def A__ ( __lowerCamelCase, __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue _lowerCAmelCase = key.replace('heads.cmd.mim_head.cls.predictions', 'mmm_image_head' ) _lowerCAmelCase = key.replace('heads.cmd.mlm_head.cls.predictions', 'mmm_text_head' ) _lowerCAmelCase = key.replace('heads.cmd.itm_head.cls', 'itm_head' ) _lowerCAmelCase = key.replace('heads.cmd.itm_head.pooler', 'itm_head.pooler' ) _lowerCAmelCase = key.replace('heads.cmd.clip_head.logit_scale', 'flava.logit_scale' ) _lowerCAmelCase = key.replace('heads.fairseq_mlm.cls.predictions', 'mlm_head' ) _lowerCAmelCase = key.replace('heads.imagenet.mim_head.cls.predictions', 'mim_head' ) _lowerCAmelCase = key.replace('mm_text_projection', 'flava.text_to_mm_projection' ) _lowerCAmelCase = key.replace('mm_image_projection', 'flava.image_to_mm_projection' ) _lowerCAmelCase = key.replace('image_encoder.module', 'flava.image_model' ) _lowerCAmelCase = key.replace('text_encoder.module', 'flava.text_model' ) _lowerCAmelCase = key.replace('mm_encoder.module.encoder.cls_token', 'flava.multimodal_model.cls_token' ) _lowerCAmelCase = key.replace('mm_encoder.module', 'flava.multimodal_model' ) _lowerCAmelCase = key.replace('text_projection', 'flava.text_projection' ) _lowerCAmelCase = key.replace('image_projection', 'flava.image_projection' ) _lowerCAmelCase = value.float() for key, value in codebook_state_dict.items(): _lowerCAmelCase = value return upgrade @torch.no_grad() def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None ): """simple docstring""" if config_path is not None: _lowerCAmelCase = FlavaConfig.from_pretrained(__lowerCamelCase ) else: _lowerCAmelCase = FlavaConfig() _lowerCAmelCase = FlavaForPreTraining(__lowerCamelCase ).eval() _lowerCAmelCase = convert_dalle_checkpoint(__lowerCamelCase, __lowerCamelCase, save_checkpoint=__lowerCamelCase ) if os.path.exists(__lowerCamelCase ): _lowerCAmelCase = torch.load(__lowerCamelCase, map_location='cpu' ) else: _lowerCAmelCase = torch.hub.load_state_dict_from_url(__lowerCamelCase, map_location='cpu' ) _lowerCAmelCase = upgrade_state_dict(__lowerCamelCase, __lowerCamelCase ) hf_model.load_state_dict(__lowerCamelCase ) _lowerCAmelCase = hf_model.state_dict() _lowerCAmelCase = count_parameters(__lowerCamelCase ) _lowerCAmelCase = count_parameters(__lowerCamelCase ) + count_parameters(__lowerCamelCase ) assert torch.allclose(__lowerCamelCase, __lowerCamelCase, atol=1e-3 ) hf_model.save_pretrained(__lowerCamelCase ) if __name__ == "__main__": a__ : Optional[Any] = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""") parser.add_argument("""--codebook_path""", default=None, type=str, help="""Path to flava codebook checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") a__ : str = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
589
0
import json import os import shutil import tempfile from unittest import TestCase from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available if is_torch_available() and is_datasets_available() and is_faiss_available(): from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.tokenization_rag import RagTokenizer @require_faiss @require_torch class __UpperCamelCase ( UpperCamelCase_ ): '''simple docstring''' def __snake_case ( self ): lowerCAmelCase = tempfile.mkdtemp() lowerCAmelCase = 8 # DPR tok lowerCAmelCase = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] lowerCAmelCase = os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) os.makedirs(__a , exist_ok=__a ) lowerCAmelCase = os.path.join(__a , DPR_VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) # BART tok lowerCAmelCase = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] lowerCAmelCase = dict(zip(__a , range(len(__a ) ) ) ) lowerCAmelCase = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] lowerCAmelCase = {'unk_token': '<unk>'} lowerCAmelCase = os.path.join(self.tmpdirname , '''bart_tokenizer''' ) os.makedirs(__a , exist_ok=__a ) lowerCAmelCase = os.path.join(__a , BART_VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCAmelCase = os.path.join(__a , BART_VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__a ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__a ) ) def __snake_case ( self ): return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) ) def __snake_case ( self ): return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) ) def __snake_case ( self ): shutil.rmtree(self.tmpdirname ) @require_tokenizers def __snake_case ( self ): lowerCAmelCase = os.path.join(self.tmpdirname , '''rag_tokenizer''' ) lowerCAmelCase = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() ) lowerCAmelCase = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() ) rag_config.save_pretrained(__a ) rag_tokenizer.save_pretrained(__a ) lowerCAmelCase = RagTokenizer.from_pretrained(__a , config=__a ) self.assertIsInstance(new_rag_tokenizer.question_encoder , __a ) self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() ) self.assertIsInstance(new_rag_tokenizer.generator , __a ) self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() ) @slow def __snake_case ( self ): lowerCAmelCase = RagTokenizer.from_pretrained('''facebook/rag-token-nq''' ) lowerCAmelCase = [ 'who got the first nobel prize in physics', 'when is the next deadpool movie being released', 'which mode is used for short wave broadcast service', 'who is the owner of reading football club', 'when is the next scandal episode coming out', 'when is the last time the philadelphia won the superbowl', 'what is the most current adobe flash player version', 'how many episodes are there in dragon ball z', 'what is the first step in the evolution of the eye', 'where is gall bladder situated in human body', 'what is the main mineral in lithium batteries', 'who is the president of usa right now', 'where do the greasers live in the outsiders', 'panda is a national animal of which country', 'what is the name of manchester united stadium', ] lowerCAmelCase = tokenizer(__a ) self.assertIsNotNone(__a ) @slow def __snake_case ( self ): lowerCAmelCase = RagTokenizer.from_pretrained('''facebook/rag-sequence-nq''' ) lowerCAmelCase = [ 'who got the first nobel prize in physics', 'when is the next deadpool movie being released', 'which mode is used for short wave broadcast service', 'who is the owner of reading football club', 'when is the next scandal episode coming out', 'when is the last time the philadelphia won the superbowl', 'what is the most current adobe flash player version', 'how many episodes are there in dragon ball z', 'what is the first step in the evolution of the eye', 'where is gall bladder situated in human body', 'what is the main mineral in lithium batteries', 'who is the president of usa right now', 'where do the greasers live in the outsiders', 'panda is a national animal of which country', 'what is the name of manchester united stadium', ] lowerCAmelCase = tokenizer(__a ) self.assertIsNotNone(__a )
716
from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = True , UpperCAmelCase_ = None , UpperCAmelCase_ = False , UpperCAmelCase_ = None , UpperCAmelCase_ = True , UpperCAmelCase_ = "arrow" , **UpperCAmelCase_ , ): super().__init__( split=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ , streaming=UpperCAmelCase_ , **UpperCAmelCase_ , ) lowerCAmelCase = load_from_cache_file lowerCAmelCase = file_format lowerCAmelCase = Spark( df=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , working_dir=UpperCAmelCase_ , **UpperCAmelCase_ , ) def __snake_case ( self ): if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) lowerCAmelCase = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=UpperCAmelCase_ , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
33
0
from dataclasses import dataclass, field from typing import Optional @dataclass class _snake_case : __lowerCAmelCase : Optional[str] = field( default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be trained.'} ) __lowerCAmelCase : Optional[str] = field( default='./' , metadata={'help': 'Save dir where model repo is cloned and models updates are saved to.'} ) __lowerCAmelCase : Optional[str] = field( default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path of training dataset.'} ) __lowerCAmelCase : Optional[str] = field( default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} ) __lowerCAmelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size for training.'} ) __lowerCAmelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size for evaluation.'} ) __lowerCAmelCase : Optional[float] = field(default=0.1 , metadata={'help': 'Value of weight decay.'} ) __lowerCAmelCase : Optional[int] = field( default=10_000 , metadata={'help': 'Size of buffer used to shuffle streaming dataset.'} ) __lowerCAmelCase : Optional[float] = field(default=2e-4 , metadata={'help': 'Learning rate fo training.'} ) __lowerCAmelCase : Optional[str] = field(default='cosine' , metadata={'help': 'Learning rate.'} ) __lowerCAmelCase : Optional[int] = field( default=750 , metadata={'help': 'Number of warmup steps in the learning rate schedule.'} ) __lowerCAmelCase : Optional[int] = field( default=16 , metadata={'help': 'Number of gradient accumulation steps.'} ) __lowerCAmelCase : Optional[bool] = field( default=UpperCAmelCase_ , metadata={'help': 'Use gradient checkpointing to reduce memory footprint.'} ) __lowerCAmelCase : Optional[int] = field(default=50_000 , metadata={'help': 'Maximum number of training steps.'} ) __lowerCAmelCase : Optional[int] = field( default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} ) __lowerCAmelCase : Optional[int] = field(default=1_024 , metadata={'help': 'Sequence lengths used for training.'} ) __lowerCAmelCase : Optional[int] = field(default=1 , metadata={'help': 'Training seed.'} ) __lowerCAmelCase : Optional[int] = field( default=1_024 , metadata={'help': 'Interval to save checkpoints. Measured as number of forward passes not training steps.'} , ) __lowerCAmelCase : Optional[str] = field( default=UpperCAmelCase_ , metadata={'help': 'States path if the training should continue from a checkpoint folder.'} ) __lowerCAmelCase : Optional[bool] = field(default=UpperCAmelCase_ , metadata={'help': 'If True the data is pretokenized.'} ) @dataclass class _snake_case : __lowerCAmelCase : Optional[str] = field( default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} ) __lowerCAmelCase : Optional[str] = field( default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} ) __lowerCAmelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size used for evaluation.'} ) __lowerCAmelCase : Optional[int] = field( default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} ) __lowerCAmelCase : Optional[int] = field(default=1_024 , metadata={'help': 'Length of sequences to be evaluated.'} ) __lowerCAmelCase : Optional[int] = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} ) @dataclass class _snake_case : __lowerCAmelCase : Optional[str] = field( default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} ) __lowerCAmelCase : Optional[int] = field(default=UpperCAmelCase_ , metadata={'help': 'Number of workers used for code evaluation.'} ) __lowerCAmelCase : Optional[int] = field( default=UpperCAmelCase_ , metadata={'help': 'The number of human-eval tasks to run. If not included all tasks are evaluated.'} , ) __lowerCAmelCase : Optional[bool] = field( default=UpperCAmelCase_ , metadata={'help': 'Sample from the language model\'s output distribution.'} ) __lowerCAmelCase : Optional[float] = field(default=0.2 , metadata={'help': 'Sampling temperature used for generation.'} ) __lowerCAmelCase : Optional[int] = field(default=256 , metadata={'help': 'Maximum number of newly generated tokens.'} ) __lowerCAmelCase : Optional[int] = field(default=0 , metadata={'help': 'Top-k parameter used for generation.'} ) __lowerCAmelCase : Optional[float] = field(default=0.95 , metadata={'help': 'Top-p parameter used for nucleus sampling.'} ) __lowerCAmelCase : Optional[int] = field(default=10 , metadata={'help': 'Number of generations to run in parallel.'} ) __lowerCAmelCase : Optional[int] = field( default=200 , metadata={'help': 'Number of completions to generate for each sample.'} ) __lowerCAmelCase : Optional[int] = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} ) __lowerCAmelCase : Optional[str] = field( default='eval_results.json' , metadata={'help': 'Random seed used for evaluation.'} ) __lowerCAmelCase : Optional[str] = field( default='0' , metadata={'help': 'Allow `code_eval` to execute Python code on machine'} ) __lowerCAmelCase : Optional[int] = field( default=-1 , metadata={ 'help': ( 'Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive' ' number corresponds to which GPU device id to run on.' ) } , ) @dataclass class _snake_case : __lowerCAmelCase : Optional[int] = field( default=UpperCAmelCase_ , metadata={ 'help': 'The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.' } , ) __lowerCAmelCase : Optional[str] = field( default='transformersbook/codeparrot' , metadata={'help': 'Folder or name of dataset to process.'} ) __lowerCAmelCase : Optional[str] = field( default='codeparrot-clean' , metadata={'help': 'Folder to save processed processed dataset.'} ) __lowerCAmelCase : Optional[int] = field( default=100_000 , metadata={'help': 'Number of files to save per JSON output file.'} ) __lowerCAmelCase : Optional[str] = field(default='content' , metadata={'help': 'Column containing text data to process.'} ) __lowerCAmelCase : Optional[float] = field( default=1_000 , metadata={'help': 'Maximum line length in file, otherwise file is filtered.'} ) __lowerCAmelCase : Optional[float] = field( default=100 , metadata={'help': 'Maximum mean line length in file, otherwise file is filtered.'} ) __lowerCAmelCase : Optional[float] = field( default=0.25 , metadata={'help': 'Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'} ) __lowerCAmelCase : Optional[float] = field( default=1.5 , metadata={'help': 'Minimum character token ratio for the file, otherwise file is filtered.'} ) __lowerCAmelCase : Optional[float] = field( default=0.7 , metadata={'help': 'Probability for filtering config, test and uncommon files.'} ) __lowerCAmelCase : Optional[str] = field( default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} , ) __lowerCAmelCase : Optional[bool] = field( default=UpperCAmelCase_ , metadata={'help': 'If True, near-duplicate samples are removed.'} ) __lowerCAmelCase : Optional[float] = field( default=0.85 , metadata={'help': 'Jaccard threshold for near-duplicate samples.'} ) @dataclass class _snake_case : __lowerCAmelCase : Optional[str] = field( default='gpt2' , metadata={'help': 'Base tokenizer to build new tokenizer from.'} ) __lowerCAmelCase : Optional[str] = field( default='transformersbook/codeparrot-train' , metadata={'help': 'Dataset to train tokenizer on.'} ) __lowerCAmelCase : Optional[str] = field(default='content' , metadata={'help': 'Column containing text data to process.'} ) __lowerCAmelCase : Optional[int] = field(default=200_000 , metadata={'help': 'Number of examples to train tokenizer on.'} ) __lowerCAmelCase : Optional[int] = field( default=32_768 , metadata={'help': 'Number of examples to train the tokenizer on.'} ) __lowerCAmelCase : Optional[str] = field(default='codeparrot' , metadata={'help': 'Name of new tokenizer.'} ) __lowerCAmelCase : Optional[bool] = field(default=UpperCAmelCase_ , metadata={'help': 'Push saved tokenizer to the hub.'} ) @dataclass class _snake_case : __lowerCAmelCase : Optional[str] = field( default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} ) __lowerCAmelCase : Optional[str] = field( default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path to the dataset to pretokenize.'} ) __lowerCAmelCase : Optional[str] = field( default='tokenized-codeparrot-train' , metadata={'help': 'Repo name of the pretokenized data.'} ) __lowerCAmelCase : Optional[int] = field(default=UpperCAmelCase_ , metadata={'help': 'Number of workers used for code evaluation.'} ) @dataclass class _snake_case : __lowerCAmelCase : Optional[str] = field( default='gpt2-large' , metadata={'help': 'Configuration to use for model initialization.'} ) __lowerCAmelCase : Optional[str] = field( default='codeparrot/codeparrot' , metadata={'help': 'Tokenizer attached to model.'} ) __lowerCAmelCase : Optional[str] = field(default='codeparrot' , metadata={'help': 'Name of the created model.'} ) __lowerCAmelCase : Optional[bool] = field(default=UpperCAmelCase_ , metadata={'help': 'Push saved tokenizer to the hub.'} )
12
lowerCamelCase__ : dict[tuple[int, int, int], int] = {} def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> int: '''simple docstring''' if late == 3 or absent == 2: return 0 # if we have no days left, and have not failed any other rules, # we have a prize string if days == 0: return 1 # No easy solution, so now we need to do the recursive calculation # First, check if the combination is already in the cache, and # if yes, return the stored value from there since we already # know the number of possible prize strings from this point on lowercase__ : Tuple = (days, absent, late) if key in cache: return cache[key] # now we calculate the three possible ways that can unfold from # this point on, depending on our attendance today # 1) if we are late (but not absent), the "absent" counter stays as # it is, but the "late" counter increases by one lowercase__ : Union[str, Any] = _calculate(days - 1 , lowercase_ , late + 1 ) # 2) if we are absent, the "absent" counter increases by 1, and the # "late" counter resets to 0 lowercase__ : List[str] = _calculate(days - 1 , absent + 1 , 0 ) # 3) if we are on time, this resets the "late" counter and keeps the # absent counter lowercase__ : Dict = _calculate(days - 1 , lowercase_ , 0 ) lowercase__ : List[str] = state_late + state_absent + state_ontime lowercase__ : List[Any] = prizestrings return prizestrings def UpperCamelCase ( lowercase_ = 30 ) -> int: '''simple docstring''' return _calculate(lowercase_ , absent=0 , late=0 ) if __name__ == "__main__": print(solution())
12
1
import re import string import numpy as np import datasets lowerCamelCase_ : Tuple = """ Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list. """ lowerCamelCase_ : Dict = """ Args: predictions: List of predicted texts. references: List of reference texts. regexes_to_ignore: List, defaults to None. Regex expressions of characters to ignore when calculating the exact matches. Note: these regexes are removed from the input data before the changes based on the options below (e.g. ignore_case, ignore_punctuation, ignore_numbers) are applied. ignore_case: Boolean, defaults to False. If true, turns everything to lowercase so that capitalization differences are ignored. ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before comparing predictions and references. ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before comparing predictions and references. Returns: exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive. Examples: >>> exact_match = datasets.load_metric(\"exact_match\") >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"] >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"] >>> results = exact_match.compute(references=refs, predictions=preds) >>> print(round(results[\"exact_match\"], 1)) 25.0 >>> exact_match = datasets.load_metric(\"exact_match\") >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"] >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True) >>> print(round(results[\"exact_match\"], 1)) 50.0 >>> exact_match = datasets.load_metric(\"exact_match\") >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"] >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True) >>> print(round(results[\"exact_match\"], 1)) 75.0 >>> exact_match = datasets.load_metric(\"exact_match\") >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"] >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True) >>> print(round(results[\"exact_match\"], 1)) 100.0 >>> exact_match = datasets.load_metric(\"exact_match\") >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"] >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"] >>> results = exact_match.compute(references=refs, predictions=preds) >>> print(round(results[\"exact_match\"], 1)) 33.3 """ lowerCamelCase_ : int = """ """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a__ ( datasets.Metric ): def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , reference_urls=[] , ) def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=False , ) -> Optional[int]: if regexes_to_ignore is not None: for s in regexes_to_ignore: __a = np.array([re.sub(UpperCAmelCase , '' , UpperCAmelCase ) for x in predictions] ) __a = np.array([re.sub(UpperCAmelCase , '' , UpperCAmelCase ) for x in references] ) else: __a = np.asarray(UpperCAmelCase ) __a = np.asarray(UpperCAmelCase ) if ignore_case: __a = np.char.lower(UpperCAmelCase ) __a = np.char.lower(UpperCAmelCase ) if ignore_punctuation: __a = string.punctuation.maketrans('' , '' , string.punctuation ) __a = np.char.translate(UpperCAmelCase , table=UpperCAmelCase ) __a = np.char.translate(UpperCAmelCase , table=UpperCAmelCase ) if ignore_numbers: __a = string.digits.maketrans('' , '' , string.digits ) __a = np.char.translate(UpperCAmelCase , table=UpperCAmelCase ) __a = np.char.translate(UpperCAmelCase , table=UpperCAmelCase ) __a = predictions == references return {"exact_match": np.mean(UpperCAmelCase ) * 1_0_0}
246
from __future__ import annotations import unittest from transformers import AutoTokenizer, MBartConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel @require_tf class a__ : A__ : List[Any] = MBartConfig A__ : Any = {} A__ : List[str] = 'gelu' def __init__( self , UpperCAmelCase , UpperCAmelCase=1_3 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=9_9 , UpperCAmelCase=3_2 , UpperCAmelCase=2 , UpperCAmelCase=4 , UpperCAmelCase=3_7 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=2_0 , UpperCAmelCase=2 , UpperCAmelCase=1 , UpperCAmelCase=0 , ) -> List[str]: __a = parent __a = batch_size __a = seq_length __a = is_training __a = use_labels __a = vocab_size __a = hidden_size __a = num_hidden_layers __a = num_attention_heads __a = intermediate_size __a = hidden_dropout_prob __a = attention_probs_dropout_prob __a = max_position_embeddings __a = eos_token_id __a = pad_token_id __a = bos_token_id def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: __a = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) __a = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) __a = tf.concat([input_ids, eos_tensor] , axis=1 ) __a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __a = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __a = prepare_mbart_inputs_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) return config, inputs_dict def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase ) -> int: __a = TFMBartModel(config=UpperCAmelCase ).get_decoder() __a = inputs_dict['input_ids'] __a = input_ids[:1, :] __a = inputs_dict['attention_mask'][:1, :] __a = inputs_dict['head_mask'] __a = 1 # first forward pass __a = model(UpperCAmelCase , attention_mask=UpperCAmelCase , head_mask=UpperCAmelCase , use_cache=UpperCAmelCase ) __a , __a = outputs.to_tuple() __a = past_key_values[1] def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , ): if attention_mask is None: __a = tf.cast(tf.math.not_equal(__lowerCamelCase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: __a = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: __a = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __a = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: __a = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class a__ ( __snake_case , __snake_case , unittest.TestCase ): A__ : List[Any] = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else () A__ : Any = (TFMBartForConditionalGeneration,) if is_tf_available() else () A__ : List[str] = ( { 'conversational': TFMBartForConditionalGeneration, 'feature-extraction': TFMBartModel, 'summarization': TFMBartForConditionalGeneration, 'text2text-generation': TFMBartForConditionalGeneration, 'translation': TFMBartForConditionalGeneration, } if is_tf_available() else {} ) A__ : int = True A__ : List[str] = False A__ : Union[str, Any] = False def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict: if pipeline_test_casse_name != "FeatureExtractionPipelineTests": # Exception encountered when calling layer '...' return True return False def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: __a = TFMBartModelTester(self ) __a = ConfigTester(self , config_class=UpperCAmelCase ) def __SCREAMING_SNAKE_CASE ( self ) -> int: self.config_tester.run_common_tests() def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: __a = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*UpperCAmelCase ) @require_sentencepiece @require_tokenizers @require_tf class a__ ( unittest.TestCase ): A__ : Optional[Any] = [ ' UN Chief Says There Is No Military Solution in Syria', ] A__ : List[Any] = [ 'Şeful ONU declară că nu există o soluţie militară în Siria', ] A__ : List[Any] = 'facebook/mbart-large-en-ro' @cached_property def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: __a = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def __SCREAMING_SNAKE_CASE ( self , **UpperCAmelCase ) -> Dict: __a = self.translate_src_text(**UpperCAmelCase ) self.assertListEqual(self.expected_text , UpperCAmelCase ) def __SCREAMING_SNAKE_CASE ( self , **UpperCAmelCase ) -> int: __a = self.tokenizer(self.src_text , **UpperCAmelCase , return_tensors='tf' ) __a = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 ) __a = self.tokenizer.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase ) return generated_words @slow def __SCREAMING_SNAKE_CASE ( self ) -> Any: self._assert_generated_batch_equal_expected()
246
1
import argparse import gc import json import os import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler lowercase : int = 16 lowercase : Tuple = 32 def lowerCAmelCase__ ( _a : Any ): return int(x / 2**20 ) class UpperCAmelCase_ : '''simple docstring''' def __enter__( self ) -> str: gc.collect() torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero snake_case_ : str = torch.cuda.memory_allocated() return self def __exit__( self , *_SCREAMING_SNAKE_CASE ) -> Optional[Any]: gc.collect() torch.cuda.empty_cache() snake_case_ : List[str] = torch.cuda.memory_allocated() snake_case_ : Dict = torch.cuda.max_memory_allocated() snake_case_ : Optional[Any] = bamb(self.end - self.begin ) snake_case_ : str = bamb(self.peak - self.begin ) # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}") def lowerCAmelCase__ ( _a : List[Any] , _a : Union[str, Any] = 16 , _a : Dict = "bert-base-cased" , _a : Optional[Any] = 3_20 , _a : Tuple = 1_60 , ): snake_case_ : Optional[int] = AutoTokenizer.from_pretrained(UpperCamelCase__ ) snake_case_ : int = load_dataset( "glue" , "mrpc" , split={"train": F'''train[:{n_train}]''', "validation": F'''validation[:{n_val}]'''} ) def tokenize_function(_a : Dict ): # max_length=None => use the model max length (it's actually the default) snake_case_ : Optional[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset snake_case_ : Optional[int] = datasets.map( UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=UpperCamelCase__ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library snake_case_ : int = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(_a : str ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(UpperCamelCase__ , padding="max_length" , max_length=1_28 , return_tensors="pt" ) return tokenizer.pad(UpperCamelCase__ , padding="longest" , return_tensors="pt" ) # Instantiate dataloaders. snake_case_ : int = DataLoader( tokenized_datasets["train"] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ ) snake_case_ : Dict = DataLoader( tokenized_datasets["validation"] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ ) return train_dataloader, eval_dataloader def lowerCAmelCase__ ( _a : List[str] , _a : Tuple ): snake_case_ : int = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs snake_case_ : Any = config["""lr"""] snake_case_ : Optional[int] = int(config["num_epochs"] ) snake_case_ : str = int(config["seed"] ) snake_case_ : Optional[Any] = int(config["batch_size"] ) snake_case_ : int = args.model_name_or_path set_seed(UpperCamelCase__ ) snake_case_ : Union[str, Any] = get_dataloaders(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , args.n_train , args.n_val ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) snake_case_ : str = AutoModelForSequenceClassification.from_pretrained(UpperCamelCase__ , return_dict=UpperCamelCase__ ) # Instantiate optimizer snake_case_ : Union[str, Any] = ( AdamW if accelerator.state.deepspeed_plugin is None or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) snake_case_ : Tuple = optimizer_cls(params=model.parameters() , lr=UpperCamelCase__ ) if accelerator.state.deepspeed_plugin is not None: snake_case_ : Optional[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[ """gradient_accumulation_steps""" ] else: snake_case_ : int = 1 snake_case_ : Any = (len(UpperCamelCase__ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): snake_case_ : List[str] = get_linear_schedule_with_warmup( optimizer=UpperCamelCase__ , num_warmup_steps=0 , num_training_steps=UpperCamelCase__ , ) else: snake_case_ : Optional[Any] = DummyScheduler(UpperCamelCase__ , total_num_steps=UpperCamelCase__ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. snake_case_ : Optional[int] = accelerator.prepare( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # We need to keep track of how many total steps we have iterated over snake_case_ : int = 0 # We also need to keep track of the stating epoch so files are named properly snake_case_ : List[str] = 0 # Now we train the model snake_case_ : Optional[Any] = {} for epoch in range(UpperCamelCase__ , UpperCamelCase__ ): with TorchTracemalloc() as tracemalloc: model.train() for step, batch in enumerate(UpperCamelCase__ ): snake_case_ : int = model(**UpperCamelCase__ ) snake_case_ : Union[str, Any] = outputs.loss snake_case_ : Optional[int] = loss / gradient_accumulation_steps accelerator.backward(UpperCamelCase__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage accelerator.print("Memory before entering the train : {}".format(bamb(tracemalloc.begin ) ) ) accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used ) ) accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked ) ) accelerator.print( "Total Peak Memory consumed during the train (max): {}".format( tracemalloc.peaked + bamb(tracemalloc.begin ) ) ) snake_case_ : Union[str, Any] = tracemalloc.peaked + bamb(tracemalloc.begin ) if args.peak_memory_upper_bound is not None: assert ( train_total_peak_memory[F'''epoch-{epoch}'''] <= args.peak_memory_upper_bound ), "Peak memory usage exceeded the upper bound" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , "peak_memory_utilization.json" ) , "w" ) as f: json.dump(UpperCamelCase__ , UpperCamelCase__ ) def lowerCAmelCase__ ( ): snake_case_ : List[str] = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." ) parser.add_argument( "--model_name_or_path" , type=UpperCamelCase__ , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=UpperCamelCase__ , ) parser.add_argument( "--output_dir" , type=UpperCamelCase__ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , ) parser.add_argument( "--peak_memory_upper_bound" , type=UpperCamelCase__ , default=UpperCamelCase__ , help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value." , ) parser.add_argument( "--n_train" , type=UpperCamelCase__ , default=3_20 , help="Number of training examples to use." , ) parser.add_argument( "--n_val" , type=UpperCamelCase__ , default=1_60 , help="Number of validation examples to use." , ) parser.add_argument( "--num_epochs" , type=UpperCamelCase__ , default=1 , help="Number of train epochs." , ) snake_case_ : int = parser.parse_args() snake_case_ : List[Any] = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16} training_function(UpperCamelCase__ , UpperCamelCase__ ) if __name__ == "__main__": main()
568
"""simple docstring""" _snake_case = 6_5521 def lowerCAmelCase__ ( UpperCamelCase__ ): '''simple docstring''' _a : List[str] = 1 _a : Optional[int] = 0 for plain_chr in plain_text: _a : Dict = (a + ord(UpperCamelCase__ )) % MOD_ADLER _a : List[Any] = (b + a) % MOD_ADLER return (b << 1_6) | a
389
0
import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import MaMaaaTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from transformers.utils import is_sentencepiece_available if is_sentencepiece_available(): from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin if is_sentencepiece_available(): lowercase : Optional[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''') if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right lowercase : Optional[int] = 12_80_22 lowercase : Union[str, Any] = 12_80_28 @require_sentencepiece class UpperCAmelCase_ ( __a , unittest.TestCase ): '''simple docstring''' A : Union[str, Any] = MaMaaaTokenizer A : Optional[Any] = False A : int = False A : List[Any] = True def _lowerCAmelCase ( self ) -> Any: super().setUp() snake_case_ : Optional[Any] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""] snake_case_ : Dict = dict(zip(a_ , range(len(a_ ) ) ) ) snake_case_ : Tuple = Path(self.tmpdirname ) save_json(a_ , save_dir / VOCAB_FILES_NAMES["vocab_file"] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(a_ , save_dir / VOCAB_FILES_NAMES["spm_file"] ) snake_case_ : Optional[Any] = MaMaaaTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def _lowerCAmelCase ( self , **_SCREAMING_SNAKE_CASE ) -> List[str]: return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **a_ ) def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Any: return ( "This is a test", "This is a test", ) def _lowerCAmelCase ( self ) -> Optional[int]: snake_case_ : Optional[int] = """</s>""" snake_case_ : str = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ ) def _lowerCAmelCase ( self ) -> int: snake_case_ : Union[str, Any] = self.get_tokenizer() snake_case_ : Dict = list(tokenizer.get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "</s>" ) self.assertEqual(vocab_keys[1] , "<unk>" ) self.assertEqual(vocab_keys[-1] , "<s>" ) self.assertEqual(len(a_ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) ) @unittest.skip("Skip this test while all models are still to be uploaded." ) def _lowerCAmelCase ( self ) -> List[Any]: pass def _lowerCAmelCase ( self ) -> int: snake_case_ : Tuple = self.get_tokenizer() snake_case_ : str = tokenizer.tokenize("This is a test" ) self.assertListEqual(a_ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(a_ ) , [2, 3, 4, 5, 6] , ) snake_case_ : List[str] = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] ) self.assertListEqual(a_ , ["▁This", "▁is", "▁a", "▁t", "est"] ) snake_case_ : Optional[int] = tokenizer.convert_tokens_to_string(a_ ) self.assertEqual(a_ , "This is a test" ) @slow def _lowerCAmelCase ( self ) -> int: snake_case_ : Tuple = {"""input_ids""": [[12_8022, 11_0108, 397, 11, 3_8272, 2247, 12_4811, 285, 1_8105, 1586, 207, 7, 3_9534, 4428, 397, 1019, 1_8105, 1586, 207, 7, 4_1337, 1_6786, 241, 7, 2_0214, 17, 12_5690, 1_0398, 7, 4_4378, 5_8069, 6_8342, 7798, 7343, 11, 299, 3_3310, 4, 158, 3_7350, 9_4077, 4569, 299, 3_3310, 90, 4, 5_2840, 290, 4, 3_1270, 112, 299, 682, 4, 5_2840, 3_9953, 1_4079, 193, 5_2519, 9_0894, 1_7894, 12_0697, 11, 4_0445, 551, 17, 1019, 5_2519, 9_0894, 1_7756, 963, 11, 4_0445, 480, 17, 9792, 1120, 5173, 1393, 6240, 1_6786, 241, 12_0996, 28, 1245, 1393, 11_8240, 1_1123, 1019, 9_3612, 2691, 1_0618, 9_8058, 12_0409, 1928, 279, 4, 4_0683, 367, 178, 207, 1019, 103, 10_3121, 506, 6_5296, 5, 2], [12_8022, 2_1217, 367, 117, 12_5450, 128, 719, 7, 7308, 40, 9_3612, 1_2669, 1116, 1_6704, 71, 1_7785, 3699, 1_5592, 35, 144, 9584, 241, 1_1943, 713, 950, 799, 2247, 8_8427, 150, 149, 11_8813, 12_0706, 1019, 10_6906, 8_1518, 28, 1224, 2_2799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [12_8022, 1658, 12_3311, 5155, 5578, 4722, 279, 1_4947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=a_ , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , ) @require_torch @require_sentencepiece @require_tokenizers class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' A : List[Any] = 'facebook/m2m100_418M' A : Optional[int] = [ 'In my opinion, there are two levels of response from the French government.', 'NSA Affair Emphasizes Complete Lack of Debate on Intelligence', ] A : Any = [ 'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.', 'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement', ] # fmt: off A : Any = [EN_CODE, 5_93, 19_49, 11_57_81, 4, 7_15_86, 42_34, 6_06_33, 12_62_33, 4_32, 12_38_08, 1_55_92, 11_97, 11_71_32, 12_06_18, 5, 2] @classmethod def _lowerCAmelCase ( cls ) -> Union[str, Any]: snake_case_ : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang="en" , tgt_lang="fr" ) snake_case_ : Dict = 1 return cls def _lowerCAmelCase ( self ) -> Optional[int]: self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 12_8006 ) self.assertEqual(self.tokenizer.get_lang_id("en" ) , 12_8022 ) self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 12_8076 ) self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 12_8063 ) def _lowerCAmelCase ( self ) -> Tuple: snake_case_ : Optional[int] = self.tokenizer.get_vocab() self.assertEqual(len(a_ ) , self.tokenizer.vocab_size ) self.assertEqual(vocab["<unk>"] , 3 ) self.assertIn(self.tokenizer.get_lang_token("en" ) , a_ ) def _lowerCAmelCase ( self ) -> List[Any]: snake_case_ : List[Any] = """en""" snake_case_ : str = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , a_ ) def _lowerCAmelCase ( self ) -> List[Any]: self.assertIn(a_ , self.tokenizer.all_special_ids ) # fmt: off snake_case_ : Union[str, Any] = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 1_4028, 136, 3286, 9706, 6, 9_0797, 6, 14_4012, 162, 8_8128, 3_0061, 5, 2] # fmt: on snake_case_ : Any = self.tokenizer.decode(a_ , skip_special_tokens=a_ ) snake_case_ : Optional[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=a_ ) self.assertEqual(a_ , a_ ) self.assertNotIn(self.tokenizer.eos_token , a_ ) def _lowerCAmelCase ( self ) -> Optional[Any]: snake_case_ : int = tempfile.mkdtemp() snake_case_ : int = self.tokenizer.lang_token_to_id self.tokenizer.save_pretrained(a_ ) snake_case_ : Optional[Any] = MaMaaaTokenizer.from_pretrained(a_ ) self.assertDictEqual(new_tok.lang_token_to_id , a_ ) @require_torch def _lowerCAmelCase ( self ) -> List[Any]: snake_case_ : Dict = """en""" snake_case_ : Union[str, Any] = """fr""" snake_case_ : Optional[int] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=a_ , return_tensors="pt" ) snake_case_ : List[str] = shift_tokens_right( batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id ) for k in batch: snake_case_ : Optional[int] = batch[k].tolist() # batch = {k: v.tolist() for k,v in batch.items()} # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 # batch.decoder_inputs_ids[0][0] == assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == FR_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2] == [2, FR_CODE] @require_torch def _lowerCAmelCase ( self ) -> Optional[Any]: snake_case_ : Optional[Any] = """mr""" self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) snake_case_ : str = """zh""" self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) @require_torch def _lowerCAmelCase ( self ) -> Union[str, Any]: snake_case_ : int = """mr""" self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) snake_case_ : Dict = """zh""" self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) @require_torch def _lowerCAmelCase ( self ) -> Dict: snake_case_ : List[str] = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" ) self.assertEqual( nested_simplify(a_ ) , { # en_XX, A, test, EOS "input_ids": [[12_8022, 58, 4183, 2]], "attention_mask": [[1, 1, 1, 1]], # ar_AR "forced_bos_token_id": 12_8006, } , )
706
from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCAmelCase_ : '''simple docstring''' def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=[10, 20, 30, 40] , _SCREAMING_SNAKE_CASE=[1, 1, 2, 1] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=None , ) -> List[str]: snake_case_ : str = parent snake_case_ : Optional[int] = batch_size snake_case_ : Dict = image_size snake_case_ : Tuple = num_channels snake_case_ : Union[str, Any] = embeddings_size snake_case_ : int = hidden_sizes snake_case_ : Optional[int] = depths snake_case_ : Dict = is_training snake_case_ : Tuple = use_labels snake_case_ : int = hidden_act snake_case_ : List[str] = num_labels snake_case_ : List[Any] = scope snake_case_ : Union[str, Any] = len(_SCREAMING_SNAKE_CASE ) def _lowerCAmelCase ( self ) -> Dict: snake_case_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ : Optional[Any] = None if self.use_labels: snake_case_ : Any = ids_tensor([self.batch_size] , self.num_labels ) snake_case_ : Optional[int] = self.get_config() return config, pixel_values, labels def _lowerCAmelCase ( self ) -> Optional[int]: return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: snake_case_ : Union[str, Any] = TFRegNetModel(config=_SCREAMING_SNAKE_CASE ) snake_case_ : List[str] = model(_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: snake_case_ : Optional[int] = self.num_labels snake_case_ : Tuple = TFRegNetForImageClassification(_SCREAMING_SNAKE_CASE ) snake_case_ : List[str] = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowerCAmelCase ( self ) -> Union[str, Any]: snake_case_ : Optional[Any] = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ : Union[str, Any] = config_and_inputs snake_case_ : Any = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): '''simple docstring''' A : Optional[int] = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () A : Dict = ( {'feature-extraction': TFRegNetModel, 'image-classification': TFRegNetForImageClassification} if is_tf_available() else {} ) A : List[Any] = False A : List[str] = False A : Optional[Any] = False A : List[Any] = False A : List[Any] = False def _lowerCAmelCase ( self ) -> Any: snake_case_ : List[Any] = TFRegNetModelTester(self ) snake_case_ : Any = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE ) def _lowerCAmelCase ( self ) -> Dict: return @unittest.skip(reason="RegNet does not use inputs_embeds" ) def _lowerCAmelCase ( self ) -> Dict: pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , ) @slow def _lowerCAmelCase ( self ) -> Any: super().test_keras_fit() @unittest.skip(reason="RegNet does not support input and output embeddings" ) def _lowerCAmelCase ( self ) -> str: pass def _lowerCAmelCase ( self ) -> int: snake_case_ , snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : int = model_class(_SCREAMING_SNAKE_CASE ) snake_case_ : str = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ : List[str] = [*signature.parameters.keys()] snake_case_ : Dict = ["pixel_values"] self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE ) def _lowerCAmelCase ( self ) -> Union[str, Any]: snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE ) def _lowerCAmelCase ( self ) -> Tuple: def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): snake_case_ : List[str] = model_class(_SCREAMING_SNAKE_CASE ) snake_case_ : Tuple = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , training=_SCREAMING_SNAKE_CASE ) snake_case_ : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states snake_case_ : Any = self.model_tester.num_stages self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) snake_case_ , snake_case_ : Dict = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : Optional[int] = ["basic", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: snake_case_ : Any = layer_type snake_case_ : int = True check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case_ : Any = True check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def _lowerCAmelCase ( self ) -> Optional[Any]: snake_case_ , snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE={} ): snake_case_ : List[Any] = model(_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) snake_case_ : List[Any] = model(_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).to_tuple() def recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if isinstance(_SCREAMING_SNAKE_CASE , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) , msg=( "Tuple and dict output are not equal. Difference:" f''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}''' ) , ) recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in self.all_model_classes: snake_case_ : str = model_class(_SCREAMING_SNAKE_CASE ) snake_case_ : List[Any] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ : Optional[Any] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ : Union[str, Any] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) snake_case_ : Union[str, Any] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ : Optional[Any] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ : Union[str, Any] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {"output_hidden_states": True} ) snake_case_ : List[Any] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) snake_case_ : Dict = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {"output_hidden_states": True} ) def _lowerCAmelCase ( self ) -> List[str]: snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE ) @slow def _lowerCAmelCase ( self ) -> Any: for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ : Dict = TFRegNetModel.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) def lowerCAmelCase__ ( ): snake_case_ : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def _lowerCAmelCase ( self ) -> List[Any]: return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _lowerCAmelCase ( self ) -> Dict: snake_case_ : str = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) snake_case_ : int = self.default_image_processor snake_case_ : List[Any] = prepare_img() snake_case_ : str = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="tf" ) # forward pass snake_case_ : Any = model(**_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE ) # verify the logits snake_case_ : List[str] = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE ) snake_case_ : Optional[int] = tf.constant([-0.4180, -1.5051, -3.4836] ) tf.debugging.assert_near(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 )
114
0
import sacrebleu as scb from packaging import version from sacrebleu import TER import datasets __lowerCamelCase : List[Any] = '''\ @inproceedings{snover-etal-2006-study, title = "A Study of Translation Edit Rate with Targeted Human Annotation", author = "Snover, Matthew and Dorr, Bonnie and Schwartz, Rich and Micciulla, Linnea and Makhoul, John", booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers", month = aug # " 8-12", year = "2006", address = "Cambridge, Massachusetts, USA", publisher = "Association for Machine Translation in the Americas", url = "https://aclanthology.org/2006.amta-papers.25", pages = "223--231", } @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ''' __lowerCamelCase : Tuple = '''\ TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu (https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found here: https://github.com/jhclark/tercom. The implementation here is slightly different from sacrebleu in terms of the required input format. The length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information. ''' __lowerCamelCase : Dict = ''' Produces TER scores alongside the number of edits and reference length. Args: predictions (list of str): The system stream (a sequence of segments). references (list of list of str): A list of one or more reference streams (each a sequence of segments). normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters, as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana. Only applies if `normalized = True`. Defaults to `False`. case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`. Returns: \'score\' (float): TER score (num_edits / sum_ref_lengths * 100) \'num_edits\' (int): The cumulative number of edits \'ref_length\' (float): The cumulative average reference length Examples: Example 1: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0} Example 2: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0} Example 3: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... normalized=True, ... case_sensitive=True) >>> print(results) {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5} Example 4: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0} Example 5: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a__ ( datasets.Metric ): def __UpperCamelCase ( self : Optional[int] ): """simple docstring""" if version.parse(scb.__version__ ) < version.parse("1.4.12" ): raise ImportWarning( "To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n" "You can install it with `pip install \"sacrebleu>=1.4.12\"`." ) return datasets.MetricInfo( description=_DESCRIPTION,citation=_CITATION,homepage="http://www.cs.umd.edu/~snover/tercom/",inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features( { "predictions": datasets.Value("string",id="sequence" ), "references": datasets.Sequence(datasets.Value("string",id="sequence" ),id="references" ), } ),codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"],reference_urls=[ "https://github.com/jhclark/tercom", ],) def __UpperCamelCase ( self : Union[str, Any],_A : Optional[Any],_A : Optional[Any],_A : bool = False,_A : bool = False,_A : bool = False,_A : bool = False,): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = len(references[0] ) if any(len(_A ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) SCREAMING_SNAKE_CASE_ : List[str] = [[refs[i] for refs in references] for i in range(_A )] SCREAMING_SNAKE_CASE_ : Tuple = TER( normalized=_A,no_punct=_A,asian_support=_A,case_sensitive=_A,) SCREAMING_SNAKE_CASE_ : Union[str, Any] = sb_ter.corpus_score(_A,_A ) return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
216
from itertools import product def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str = sides_number SCREAMING_SNAKE_CASE_ : List[str] = max_face_number * dice_number SCREAMING_SNAKE_CASE_ : Tuple = [0] * (max_total + 1) SCREAMING_SNAKE_CASE_ : Tuple = 1 SCREAMING_SNAKE_CASE_ : List[str] = range(lowerCAmelCase , max_face_number + 1 ) for dice_numbers in product(lowerCAmelCase , repeat=lowerCAmelCase ): SCREAMING_SNAKE_CASE_ : Optional[int] = sum(lowerCAmelCase ) totals_frequencies[total] += 1 return totals_frequencies def _snake_case ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = total_frequency_distribution( sides_number=4 , dice_number=9 ) SCREAMING_SNAKE_CASE_ : List[str] = total_frequency_distribution( sides_number=6 , dice_number=6 ) SCREAMING_SNAKE_CASE_ : List[Any] = 0 SCREAMING_SNAKE_CASE_ : str = 9 SCREAMING_SNAKE_CASE_ : Any = 4 * 9 SCREAMING_SNAKE_CASE_ : str = 6 for peter_total in range(lowerCAmelCase , max_peter_total + 1 ): peter_wins_count += peter_totals_frequencies[peter_total] * sum( colin_totals_frequencies[min_colin_total:peter_total] ) SCREAMING_SNAKE_CASE_ : Dict = (4**9) * (6**6) SCREAMING_SNAKE_CASE_ : Union[str, Any] = peter_wins_count / total_games_number SCREAMING_SNAKE_CASE_ : Optional[Any] = round(lowerCAmelCase , ndigits=7 ) return rounded_peter_win_probability if __name__ == "__main__": print(f'''{solution() = }''')
216
1
"""simple docstring""" import unittest import numpy as np import torch from diffusers import VersatileDiffusionImageVariationPipeline from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device __lowercase : Union[str, Any] = False class _A ( unittest.TestCase ): """simple docstring""" pass @slow @require_torch_gpu class _A ( unittest.TestCase ): """simple docstring""" def lowercase ( self : Optional[int] ) -> Dict: __snake_case = VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''' ) pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) __snake_case = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' ) __snake_case = torch.manual_seed(0 ) __snake_case = pipe( image=A_ , generator=A_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images __snake_case = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) __snake_case = np.array([0.04_41, 0.04_69, 0.05_07, 0.05_75, 0.06_32, 0.06_50, 0.08_65, 0.09_09, 0.09_45] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
93
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPSegProcessor, ViTImageProcessor @require_vision class _A ( unittest.TestCase ): """simple docstring""" def lowercase ( self : Optional[int] ) -> List[str]: __snake_case = tempfile.mkdtemp() # fmt: off __snake_case = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>'''] # fmt: on __snake_case = dict(zip(A_ , range(len(A_ ) ) ) ) __snake_case = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', ''''''] __snake_case = {'''unk_token''': '''<unk>'''} __snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(A_ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(A_ ) ) __snake_case = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73], '''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11], } __snake_case = os.path.join(self.tmpdirname , A_ ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(A_ , A_ ) def lowercase ( self : Optional[Any] , **A_ : Dict ) -> Any: return CLIPTokenizer.from_pretrained(self.tmpdirname , **A_ ) def lowercase ( self : Optional[int] , **A_ : str ) -> str: return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **A_ ) def lowercase ( self : Any , **A_ : Tuple ) -> Tuple: return ViTImageProcessor.from_pretrained(self.tmpdirname , **A_ ) def lowercase ( self : Optional[int] ) -> Optional[Any]: shutil.rmtree(self.tmpdirname ) def lowercase ( self : int ) -> Optional[Any]: __snake_case = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] __snake_case = [Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowercase ( self : Optional[Any] ) -> Optional[Any]: __snake_case = self.get_tokenizer() __snake_case = self.get_rust_tokenizer() __snake_case = self.get_image_processor() __snake_case = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ ) processor_slow.save_pretrained(self.tmpdirname ) __snake_case = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=A_ ) __snake_case = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ ) processor_fast.save_pretrained(self.tmpdirname ) __snake_case = CLIPSegProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , A_ ) self.assertIsInstance(processor_fast.tokenizer , A_ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , A_ ) self.assertIsInstance(processor_fast.image_processor , A_ ) def lowercase ( self : Union[str, Any] ) -> Any: __snake_case = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __snake_case = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) __snake_case = self.get_image_processor(do_normalize=A_ , padding_value=1.0 ) __snake_case = CLIPSegProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=A_ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , A_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , A_ ) def lowercase ( self : Any ) -> str: __snake_case = self.get_image_processor() __snake_case = self.get_tokenizer() __snake_case = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ ) __snake_case = self.prepare_image_inputs() __snake_case = image_processor(A_ , return_tensors='''np''' ) __snake_case = processor(images=A_ , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def lowercase ( self : List[str] ) -> List[Any]: __snake_case = self.get_image_processor() __snake_case = self.get_tokenizer() __snake_case = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ ) __snake_case = '''lower newer''' __snake_case = processor(text=A_ ) __snake_case = tokenizer(A_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowercase ( self : List[Any] ) -> str: __snake_case = self.get_image_processor() __snake_case = self.get_tokenizer() __snake_case = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ ) __snake_case = '''lower newer''' __snake_case = self.prepare_image_inputs() __snake_case = processor(text=A_ , images=A_ ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(A_ ): processor() def lowercase ( self : Union[str, Any] ) -> Any: __snake_case = self.get_image_processor() __snake_case = self.get_tokenizer() __snake_case = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ ) __snake_case = self.prepare_image_inputs() __snake_case = self.prepare_image_inputs() __snake_case = processor(images=A_ , visual_prompt=A_ ) self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''conditional_pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(A_ ): processor() def lowercase ( self : Optional[int] ) -> Dict: __snake_case = self.get_image_processor() __snake_case = self.get_tokenizer() __snake_case = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ ) __snake_case = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __snake_case = processor.batch_decode(A_ ) __snake_case = tokenizer.batch_decode(A_ ) self.assertListEqual(A_ , A_ )
93
1
import math def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int) -> str: '''simple docstring''' __UpperCamelCase : Union[str, Any] = 0 __UpperCamelCase : int = 0 while num > 0: __UpperCamelCase : List[Any] = num % 8 __UpperCamelCase : Tuple = octal + (remainder * math.floor(math.pow(10 , _lowerCamelCase))) counter += 1 __UpperCamelCase : Optional[Any] = math.floor(num / 8) # basically /= 8 without remainder if any # This formatting removes trailing '.0' from `octal`. return F'0o{int(_lowerCamelCase)}' def _SCREAMING_SNAKE_CASE ( ) -> None: '''simple docstring''' print("\n2 in octal is:") print(decimal_to_octal(2)) # = 2 print("\n8 in octal is:") print(decimal_to_octal(8)) # = 10 print("\n65 in octal is:") print(decimal_to_octal(65)) # = 101 print("\n216 in octal is:") print(decimal_to_octal(216)) # = 330 print("\n512 in octal is:") print(decimal_to_octal(512)) # = 1000 print("\n") if __name__ == "__main__": main()
557
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowercase : List[str] = { 'configuration_gpt_bigcode': ['GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTBigCodeConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : List[str] = [ 'GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST', 'GPTBigCodeForSequenceClassification', 'GPTBigCodeForTokenClassification', 'GPTBigCodeForCausalLM', 'GPTBigCodeModel', 'GPTBigCodePreTrainedModel', ] if TYPE_CHECKING: from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_bigcode import ( GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, GPTBigCodePreTrainedModel, ) else: import sys lowercase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
557
1
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = 100 ): lowercase = set() lowercase = 0 lowercase = n + 1 # maximum limit for a in range(2 , __SCREAMING_SNAKE_CASE ): for b in range(2 , __SCREAMING_SNAKE_CASE ): lowercase = a**b # calculates the current power collect_powers.add(__SCREAMING_SNAKE_CASE ) # adds the result to the set return len(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": print('''Number of terms ''', solution(int(str(input()).strip())))
565
import os import re import shutil from argparse import ArgumentParser, Namespace from datasets.commands import BaseDatasetsCLICommand from datasets.utils.logging import get_logger UpperCAmelCase = '''<<<<<<< This should probably be modified because it mentions: ''' UpperCAmelCase = '''======= >>>>>>> ''' UpperCAmelCase = [ '''TextEncoderConfig''', '''ByteTextEncoder''', '''SubwordTextEncoder''', '''encoder_config''', '''maybe_build_from_corpus''', '''manual_dir''', ] UpperCAmelCase = [ # (pattern, replacement) # Order is important here for some replacements (R'''tfds\.core''', R'''datasets'''), (R'''tf\.io\.gfile\.GFile''', R'''open'''), (R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''), (R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''), (R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''), (R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''), (R'''tfds\.features\.FeaturesDict\(''', R'''dict('''), (R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''), (R'''tfds\.''', R'''datasets.'''), (R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''), (R'''self\.builder_config''', R'''self.config'''), ] def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ): return ConvertCommand(args.tfds_path , args.datasets_directory ) class A_ ( __lowerCamelCase ): '''simple docstring''' @staticmethod def SCREAMING_SNAKE_CASE__ ( snake_case ): lowercase = parser.add_parser( 'convert' , help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' , ) train_parser.add_argument( '--tfds_path' , type=snake_case , required=snake_case , help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' , ) train_parser.add_argument( '--datasets_directory' , type=snake_case , required=snake_case , help='Path to the HuggingFace Datasets folder.' ) train_parser.set_defaults(func=snake_case ) def __init__( self , snake_case , snake_case , *snake_case ): lowercase = get_logger('datasets-cli/converting' ) lowercase = tfds_path lowercase = datasets_directory def SCREAMING_SNAKE_CASE__ ( self ): if os.path.isdir(self._tfds_path ): lowercase = os.path.abspath(self._tfds_path ) elif os.path.isfile(self._tfds_path ): lowercase = os.path.dirname(self._tfds_path ) else: raise ValueError('--tfds_path is neither a directory nor a file. Please check path.' ) lowercase = os.path.abspath(self._datasets_directory ) self._logger.info(F'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' ) lowercase = [] lowercase = [] lowercase = {} if os.path.isdir(self._tfds_path ): lowercase = os.listdir(snake_case ) else: lowercase = [os.path.basename(self._tfds_path )] for f_name in file_names: self._logger.info(F'''Looking at file {f_name}''' ) lowercase = os.path.join(snake_case , snake_case ) lowercase = os.path.join(snake_case , snake_case ) if not os.path.isfile(snake_case ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name: self._logger.info('Skipping file' ) continue with open(snake_case , encoding='utf-8' ) as f: lowercase = f.readlines() lowercase = [] lowercase = False lowercase = False lowercase = [] for line in lines: lowercase = line # Convert imports if "import tensorflow.compat.v2 as tf" in out_line: continue elif "@tfds.core" in out_line: continue elif "builder=self" in out_line: continue elif "import tensorflow_datasets.public_api as tfds" in out_line: lowercase = 'import datasets\n' elif "import tensorflow" in out_line: # order is important here lowercase = '' continue elif "from absl import logging" in out_line: lowercase = 'from datasets import logging\n' elif "getLogger" in out_line: lowercase = out_line.replace('getLogger' , 'get_logger' ) elif any(expression in out_line for expression in TO_HIGHLIGHT ): lowercase = True lowercase = list(filter(lambda snake_case : e in out_line , snake_case ) ) out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(snake_case ) + '\n' ) out_lines.append(snake_case ) out_lines.append(snake_case ) continue else: for pattern, replacement in TO_CONVERT: lowercase = re.sub(snake_case , snake_case , snake_case ) # Take care of saving utilities (to later move them together with main script) if "tensorflow_datasets" in out_line: lowercase = re.match(r'from\stensorflow_datasets.*import\s([^\.\r\n]+)' , snake_case ) tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(',' ) ) lowercase = 'from . import ' + match.group(1 ) # Check we have not forget anything if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line: raise ValueError(F'''Error converting {out_line.strip()}''' ) if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line: lowercase = True out_lines.append(snake_case ) if is_builder or "wmt" in f_name: # We create a new directory for each dataset lowercase = f_name.replace('.py' , '' ) lowercase = os.path.join(snake_case , snake_case ) lowercase = os.path.join(snake_case , snake_case ) os.makedirs(snake_case , exist_ok=snake_case ) self._logger.info(F'''Adding directory {output_dir}''' ) imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} ) else: # Utilities will be moved at the end utils_files.append(snake_case ) if needs_manual_update: with_manual_update.append(snake_case ) with open(snake_case , 'w' , encoding='utf-8' ) as f: f.writelines(snake_case ) self._logger.info(F'''Converted in {output_file}''' ) for utils_file in utils_files: try: lowercase = os.path.basename(snake_case ) lowercase = imports_to_builder_map[f_name.replace('.py' , '' )] self._logger.info(F'''Moving {dest_folder} to {utils_file}''' ) shutil.copy(snake_case , snake_case ) except KeyError: self._logger.error(F'''Cannot find destination folder for {utils_file}. Please copy manually.''' ) if with_manual_update: for file_path in with_manual_update: self._logger.warning( F'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
565
1
'''simple docstring''' import unittest from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin _lowercase = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' _lowercase : List[str] = XLMProphetNetTokenizer _lowercase : str = False _lowercase : List[Any] = True def _lowercase ( self ): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing _lowerCAmelCase = XLMProphetNetTokenizer(_lowercase , keep_accents=_lowercase ) tokenizer.save_pretrained(self.tmpdirname ) def _lowercase ( self ): """simple docstring""" _lowerCAmelCase = """[PAD]""" _lowerCAmelCase = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase ) def _lowercase ( self ): """simple docstring""" _lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """[PAD]""" ) self.assertEqual(vocab_keys[1] , """[CLS]""" ) self.assertEqual(vocab_keys[-1] , """j""" ) self.assertEqual(len(_lowercase ) , 1_012 ) def _lowercase ( self ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1_012 ) def _lowercase ( self ): """simple docstring""" _lowerCAmelCase = XLMProphetNetTokenizer(_lowercase , keep_accents=_lowercase ) _lowerCAmelCase = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(_lowercase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_lowercase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) _lowerCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( _lowercase , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) _lowerCAmelCase = tokenizer.convert_tokens_to_ids(_lowercase ) self.assertListEqual( _lowercase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4] ] , ) _lowerCAmelCase = tokenizer.convert_ids_to_tokens(_lowercase ) self.assertListEqual( _lowercase , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """[UNK]""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """[UNK]""", """.""", ] , ) @cached_property def _lowercase ( self ): """simple docstring""" return XLMProphetNetTokenizer.from_pretrained("""microsoft/xprophetnet-large-wiki100-cased""" ) @slow def _lowercase ( self ): """simple docstring""" _lowerCAmelCase = """Hello World!""" _lowerCAmelCase = [35_389, 6_672, 49, 2] self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase ) ) @slow def _lowercase ( self ): """simple docstring""" _lowerCAmelCase = {"""input_ids""": [[11_073, 82_783, 18, 26, 82_783, 549, 51_540, 248, 17_209, 1_301, 217, 20, 215_186, 1_325, 147, 17_209, 1_301, 217, 20, 56_370, 53, 122_020, 20, 16_477, 27, 87_355, 4_548, 20, 4_728, 78_392, 17, 159_969, 18, 26, 24_491, 629, 15, 538, 22_704, 5_439, 15, 2_788, 24_491, 9_885, 15, 43_534, 605, 15, 814, 18_403, 33_200, 29, 15, 43_534, 24_458, 12_410, 111, 24_966, 83_669, 9_637, 144_068, 26, 850, 22_346, 27, 147, 24_966, 83_669, 83_490, 26, 39_113, 735, 27, 689, 656, 2_800, 1_339, 4_600, 53, 122_020, 115_785, 34, 816, 1_339, 46_887, 18, 147, 53_905, 1_951, 42_238, 41_170, 17_732, 834, 436, 15, 27_523, 98_733, 217, 147, 5_542, 4_981, 930, 17_347, 16, 2], [20_091, 629, 94, 82_786, 58, 490, 20, 1_528, 84, 53_905, 344, 80_592, 110_128, 18_822, 5_267, 1_306, 62, 152_537, 308, 7_997, 401, 124_427, 549, 35_442, 225, 109, 15_055, 25_748, 147, 7_119, 43_712, 34, 767, 135_366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63_784, 119_466, 17, 147_808, 88_214, 18, 656, 81, 32, 3_296, 10_280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_lowercase , model_name="""microsoft/xprophetnet-large-wiki100-cased""" , revision="""1acad1643ddd54a44df6a1b797ada8373685d90e""" , )
5
"""simple docstring""" import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class __lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowerCamelCase ( self : Dict ): lowerCAmelCase_ : int = 10 def lowerCamelCase ( self : List[str] ): lowerCAmelCase_ : Optional[int] = [1, 2, 3, 4] lowerCAmelCase_ : str = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(a_ , self.block_size , 0 ) , a_ ) def lowerCamelCase ( self : List[str] ): lowerCAmelCase_ : Tuple = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] lowerCAmelCase_ : int = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(a_ , self.block_size , 0 ) , a_ ) def lowerCamelCase ( self : Any ): lowerCAmelCase_ : int = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] lowerCAmelCase_ : str = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(a_ , self.block_size , 0 ) , a_ ) def lowerCamelCase ( self : Optional[int] ): lowerCAmelCase_ : Optional[Any] = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this." lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = process_story(a_ ) self.assertEqual(a_ , [] ) def lowerCamelCase ( self : Dict ): lowerCAmelCase_ : Tuple = "" lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = process_story(a_ ) self.assertEqual(a_ , [] ) self.assertEqual(a_ , [] ) def lowerCamelCase ( self : int ): lowerCAmelCase_ : Optional[Any] = ( "It was the year of Our Lord one thousand seven hundred and " "seventy-five\n\nSpiritual revelations were conceded to England " "at that favoured period, as at this.\n@highlight\n\nIt was the best of times" ) lowerCAmelCase_ , lowerCAmelCase_ : str = process_story(a_ ) lowerCAmelCase_ : Union[str, Any] = [ "It was the year of Our Lord one thousand seven hundred and seventy-five.", "Spiritual revelations were conceded to England at that favoured period, as at this.", ] self.assertEqual(a_ , a_ ) lowerCAmelCase_ : str = ["It was the best of times."] self.assertEqual(a_ , a_ ) def lowerCamelCase ( self : Union[str, Any] ): lowerCAmelCase_ : Optional[Any] = torch.tensor([1, 2, 3, 4] ) lowerCAmelCase_ : Union[str, Any] = torch.tensor([1, 1, 1, 1] ) np.testing.assert_array_equal(build_mask(a_ , 0 ).numpy() , expected.numpy() ) def lowerCamelCase ( self : List[Any] ): lowerCAmelCase_ : Union[str, Any] = torch.tensor([1, 2, 3, 4, 23, 23, 23] ) lowerCAmelCase_ : Optional[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(a_ , 23 ).numpy() , expected.numpy() ) def lowerCamelCase ( self : List[str] ): lowerCAmelCase_ : str = torch.tensor([8, 2, 3, 4, 1, 1, 1] ) lowerCAmelCase_ : Tuple = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(a_ , 1 ).numpy() , expected.numpy() ) def lowerCamelCase ( self : str ): lowerCAmelCase_ : List[Any] = 1_01 lowerCAmelCase_ : Optional[Any] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_01, 5, 6], [1, 1_01, 3, 4, 1_01, 6]] ) lowerCAmelCase_ : Any = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] ) lowerCAmelCase_ : List[Any] = compute_token_type_ids(a_ , a_ ) np.testing.assert_array_equal(a_ , a_ )
610
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _A : Tuple =logging.get_logger(__name__) _A : Any ={ '''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''', } class _lowercase ( _lowercase ): '''simple docstring''' a = """open-llama""" def __init__( self: str , UpperCamelCase__: Optional[int]=100_000 , UpperCamelCase__: List[str]=4_096 , UpperCamelCase__: Tuple=11_008 , UpperCamelCase__: Union[str, Any]=32 , UpperCamelCase__: str=32 , UpperCamelCase__: str="silu" , UpperCamelCase__: int=2_048 , UpperCamelCase__: str=0.02 , UpperCamelCase__: Dict=1e-6 , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: List[Any]=0 , UpperCamelCase__: int=1 , UpperCamelCase__: int=2 , UpperCamelCase__: Optional[Any]=False , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: List[Any]=0.1 , UpperCamelCase__: Tuple=0.1 , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: int=True , UpperCamelCase__: Dict=None , **UpperCamelCase__: Optional[int] , ): lowerCamelCase__ : List[Any] = vocab_size lowerCamelCase__ : Optional[int] = max_position_embeddings lowerCamelCase__ : Optional[Any] = hidden_size lowerCamelCase__ : List[str] = intermediate_size lowerCamelCase__ : str = num_hidden_layers lowerCamelCase__ : List[Any] = num_attention_heads lowerCamelCase__ : Dict = hidden_act lowerCamelCase__ : int = initializer_range lowerCamelCase__ : Optional[int] = rms_norm_eps lowerCamelCase__ : Optional[Any] = use_cache lowerCamelCase__ : str = kwargs.pop( """use_memorry_efficient_attention""" , lowercase_ ) lowerCamelCase__ : Any = hidden_dropout_prob lowerCamelCase__ : str = attention_dropout_prob lowerCamelCase__ : List[Any] = use_stable_embedding lowerCamelCase__ : Any = shared_input_output_embedding lowerCamelCase__ : List[str] = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , tie_word_embeddings=lowercase_ , **lowercase_ , ) def lowerCamelCase_ ( self: Optional[int] ): if self.rope_scaling is None: return if not isinstance(self.rope_scaling , lowercase_ ) or len(self.rope_scaling ) != 2: raise ValueError( """`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """ F'''got {self.rope_scaling}''' ) lowerCamelCase__ : List[str] = self.rope_scaling.get("""type""" , lowercase_ ) lowerCamelCase__ : str = self.rope_scaling.get("""factor""" , lowercase_ ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(lowercase_ , lowercase_ ) or rope_scaling_factor <= 1.0: raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
716
'''simple docstring''' import sys import turtle def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> tuple[float, float]: return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2 def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> None: my_pen.up() my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.down() my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.goto(vertexa[0] , vertexa[1] ) if depth == 0: return triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 ) triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 ) triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 ) if __name__ == "__main__": if len(sys.argv) != 2: raise ValueError( '''Correct format for using this script: ''' '''python fractals.py <int:depth_for_fractal>''' ) _A : Any =turtle.Turtle() my_pen.ht() my_pen.speed(5) my_pen.pencolor('''red''') _A : Dict =[(-175, -125), (0, 175), (175, -125)] # vertices of triangle triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
631
0
from typing import Dict, List, Optional, Union import numpy as np from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy lowerCAmelCase = logging.get_logger(__name__) class A ( A_ ): def __init__(self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ): __lowercase= feature_size __lowercase= sampling_rate __lowercase= padding_value __lowercase= kwargs.pop('padding_side' , 'right' ) __lowercase= kwargs.pop('return_attention_mask' , lowerCAmelCase ) super().__init__(**lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , ): # If we have a list of dicts, let's convert it in a dict of lists # We do this to allow using this method as a collate_fn function in PyTorch Dataloader if isinstance(lowerCAmelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ): __lowercase= { key: [example[key] for example in processed_features] for key in processed_features[0].keys() } # The model's main input name, usually `input_values`, has be passed for padding if self.model_input_names[0] not in processed_features: raise ValueError( 'You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`' f' to this method that includes {self.model_input_names[0]}, but you provided' f' {list(processed_features.keys() )}' ) __lowercase= processed_features[self.model_input_names[0]] __lowercase= ( return_attention_mask if return_attention_mask is not None else self.return_attention_mask ) if len(lowerCAmelCase ) == 0: if return_attention_mask: __lowercase= [] return processed_features # If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays # and rebuild them afterwards if no return_tensors is specified # Note that we lose the specific device the tensor may be on for PyTorch __lowercase= required_input[0] if isinstance(lowerCAmelCase , (list, tuple) ): # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element. __lowercase= 0 while len(required_input[index] ) == 0: index += 1 if index < len(lowerCAmelCase ): __lowercase= required_input[index][0] if return_tensors is None: if is_tf_tensor(lowerCAmelCase ): __lowercase= 'tf' elif is_torch_tensor(lowerCAmelCase ): __lowercase= 'pt' elif isinstance(lowerCAmelCase , (int, float, list, tuple, np.ndarray) ): __lowercase= 'np' else: raise ValueError( f'type of {first_element} unknown: {type(lowerCAmelCase )}. ' 'Should be one of a python, numpy, pytorch or tensorflow object.' ) for key, value in processed_features.items(): if isinstance(value[0] , (int, float) ): __lowercase= to_numpy(lowerCAmelCase ) else: __lowercase= [to_numpy(lowerCAmelCase ) for v in value] # Convert padding_strategy in PaddingStrategy __lowercase= self._get_padding_strategies(padding=lowerCAmelCase , max_length=lowerCAmelCase ) __lowercase= processed_features[self.model_input_names[0]] __lowercase= len(lowerCAmelCase ) if not all(len(lowerCAmelCase ) == batch_size for v in processed_features.values() ): raise ValueError('Some items in the output dictionary have a different batch size than others.' ) __lowercase= [] for i in range(lowerCAmelCase ): __lowercase= {k: v[i] for k, v in processed_features.items()} # truncation __lowercase= self._truncate( lowerCAmelCase , max_length=lowerCAmelCase , pad_to_multiple_of=lowerCAmelCase , truncation=lowerCAmelCase , ) truncated_inputs.append(lowerCAmelCase ) if padding_strategy == PaddingStrategy.LONGEST: # make sure that `max_length` cannot be longer than the longest truncated length __lowercase= max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs ) __lowercase= PaddingStrategy.MAX_LENGTH __lowercase= {} for i in range(lowerCAmelCase ): # padding __lowercase= self._pad( truncated_inputs[i] , max_length=lowerCAmelCase , padding_strategy=lowerCAmelCase , pad_to_multiple_of=lowerCAmelCase , return_attention_mask=lowerCAmelCase , ) for key, value in outputs.items(): if key not in batch_outputs: __lowercase= [] if value.dtype is np.dtype(np.floataa ): __lowercase= value.astype(np.floataa ) batch_outputs[key].append(lowerCAmelCase ) return BatchFeature(lowerCAmelCase , tensor_type=lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = PaddingStrategy.DO_NOT_PAD , lowerCAmelCase = None , lowerCAmelCase = None , ): __lowercase= processed_features[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: __lowercase= len(lowerCAmelCase ) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): __lowercase= ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of __lowercase= padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowerCAmelCase ) < max_length if return_attention_mask and "attention_mask" not in processed_features: __lowercase= np.ones(len(lowerCAmelCase ) , dtype=np.intaa ) if needs_to_be_padded: __lowercase= max_length - len(lowerCAmelCase ) if self.padding_side == "right": if return_attention_mask: __lowercase= np.pad( processed_features['attention_mask'] , (0, difference) ) __lowercase= ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference) __lowercase= np.pad( lowerCAmelCase , lowerCAmelCase , 'constant' , constant_values=self.padding_value ) elif self.padding_side == "left": if return_attention_mask: __lowercase= np.pad( processed_features['attention_mask'] , (difference, 0) ) __lowercase= ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0) __lowercase= np.pad( lowerCAmelCase , lowerCAmelCase , 'constant' , constant_values=self.padding_value ) else: raise ValueError('Invalid padding strategy:' + str(self.padding_side ) ) return processed_features def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , ): if not truncation: return processed_features elif truncation and max_length is None: raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.' ) __lowercase= processed_features[self.model_input_names[0]] # find `max_length` that fits `pad_to_multiple_of` if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): __lowercase= ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of __lowercase= len(lowerCAmelCase ) > max_length if needs_to_be_truncated: __lowercase= processed_features[self.model_input_names[0]][:max_length] if "attention_mask" in processed_features: __lowercase= processed_features['attention_mask'][:max_length] return processed_features def _A (self , lowerCAmelCase=False , lowerCAmelCase=None ): # Get padding strategy if padding is not False: if padding is True: __lowercase= PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch elif not isinstance(lowerCAmelCase , lowerCAmelCase ): __lowercase= PaddingStrategy(lowerCAmelCase ) elif isinstance(lowerCAmelCase , lowerCAmelCase ): __lowercase= padding else: __lowercase= PaddingStrategy.DO_NOT_PAD # Set max length if needed if max_length is None: if padding_strategy == PaddingStrategy.MAX_LENGTH: raise ValueError( f'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined' ) # Test if we have a padding value if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None): raise ValueError( 'Asking to pad but the feature_extractor does not have a padding value. Please select a value to use' ' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.' ) return padding_strategy
230
def _lowerCamelCase( lowercase__ = 1_0**9 ) -> int: '''simple docstring''' __lowercase= 1 __lowercase= 2 __lowercase= 0 __lowercase= 0 __lowercase= 0 while perimeter <= max_perimeter: perimeters_sum += perimeter prev_value += 2 * value value += prev_value __lowercase= 2 * value + 2 if i % 2 == 0 else 2 * value - 2 i += 1 return perimeters_sum if __name__ == "__main__": print(F'{solution() = }')
230
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available UpperCamelCase_ = {"tokenization_herbert": ["HerbertTokenizer"]} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["HerbertTokenizerFast"] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
708
'''simple docstring''' import copy import inspect import unittest from transformers import PretrainedConfig, SwiftFormerConfig from transformers.testing_utils import ( require_torch, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwiftFormerForImageClassification, SwiftFormerModel from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __UpperCAmelCase : '''simple docstring''' def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=224 , _UpperCAmelCase=1000 , _UpperCAmelCase=[3, 3, 6, 4] , _UpperCAmelCase=[48, 56, 112, 220] , ): UpperCAmelCase__ : List[str] = parent UpperCAmelCase__ : Union[str, Any] = batch_size UpperCAmelCase__ : Tuple = num_channels UpperCAmelCase__ : Tuple = is_training UpperCAmelCase__ : Union[str, Any] = use_labels UpperCAmelCase__ : List[Any] = hidden_dropout_prob UpperCAmelCase__ : Optional[Any] = attention_probs_dropout_prob UpperCAmelCase__ : Optional[int] = num_labels UpperCAmelCase__ : Optional[Any] = image_size UpperCAmelCase__ : int = layer_depths UpperCAmelCase__ : List[str] = embed_dims def lowerCamelCase ( self ): UpperCAmelCase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase__ : Union[str, Any] = None if self.use_labels: UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.num_labels ) UpperCAmelCase__ : Optional[int] = self.get_config() return config, pixel_values, labels def lowerCamelCase ( self ): return SwiftFormerConfig( depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='''gelu''' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_UpperCAmelCase , layer_scale_init_value=1E-5 , ) def lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): UpperCAmelCase__ : List[str] = SwiftFormerModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() UpperCAmelCase__ : List[Any] = model(_UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) ) def lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): UpperCAmelCase__ : List[str] = self.num_labels UpperCAmelCase__ : str = SwiftFormerForImageClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() UpperCAmelCase__ : Optional[Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) UpperCAmelCase__ : Optional[Any] = SwiftFormerForImageClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() UpperCAmelCase__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase__ : Optional[int] = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase ( self ): ((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) : Tuple = self.prepare_config_and_inputs() UpperCAmelCase__ : Dict = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else () SCREAMING_SNAKE_CASE : Union[str, Any] = ( {"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification} if is_torch_available() else {} ) SCREAMING_SNAKE_CASE : int = False SCREAMING_SNAKE_CASE : int = False SCREAMING_SNAKE_CASE : Any = False SCREAMING_SNAKE_CASE : str = False SCREAMING_SNAKE_CASE : List[Any] = False def lowerCamelCase ( self ): UpperCAmelCase__ : Tuple = SwiftFormerModelTester(self ) UpperCAmelCase__ : Any = ConfigTester( self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , ) def lowerCamelCase ( self ): self.config_tester.run_common_tests() @unittest.skip(reason='''SwiftFormer does not use inputs_embeds''' ) def lowerCamelCase ( self ): pass def lowerCamelCase ( self ): UpperCAmelCase__ , UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : Optional[Any] = model_class(_UpperCAmelCase ) UpperCAmelCase__ : str = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) ) def lowerCamelCase ( self ): UpperCAmelCase__ , UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : Union[str, Any] = model_class(_UpperCAmelCase ) UpperCAmelCase__ : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase__ : Union[str, Any] = [*signature.parameters.keys()] UpperCAmelCase__ : Union[str, Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _UpperCAmelCase ) def lowerCamelCase ( self ): UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def lowerCamelCase ( self ): UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase ) @slow def lowerCamelCase ( self ): for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : List[Any] = SwiftFormerModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) @unittest.skip(reason='''SwiftFormer does not output attentions''' ) def lowerCamelCase ( self ): pass def lowerCamelCase ( self ): def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): UpperCAmelCase__ : str = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() with torch.no_grad(): UpperCAmelCase__ : str = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) UpperCAmelCase__ : Optional[int] = outputs.hidden_states UpperCAmelCase__ : Optional[Any] = 8 self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase ) # TODO # SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width) # with the width and height being successively divided by 2, after every 2 blocks for i in range(len(_UpperCAmelCase ) ): self.assertEqual( hidden_states[i].shape , torch.Size( [ self.model_tester.batch_size, self.model_tester.embed_dims[i // 2], (self.model_tester.image_size // 4) // 2 ** (i // 2), (self.model_tester.image_size // 4) // 2 ** (i // 2), ] ) , ) UpperCAmelCase__ , UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : Optional[Any] = True check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase__ : int = True check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def lowerCamelCase ( self ): def _config_zero_init(_UpperCAmelCase ): UpperCAmelCase__ : str = copy.deepcopy(_UpperCAmelCase ) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(_UpperCAmelCase , _UpperCAmelCase , 1E-10 ) if isinstance(getattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase ): UpperCAmelCase__ : Union[str, Any] = _config_zero_init(getattr(_UpperCAmelCase , _UpperCAmelCase ) ) setattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) return configs_no_init UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ : Optional[Any] = _config_zero_init(_UpperCAmelCase ) for model_class in self.all_model_classes: UpperCAmelCase__ : str = model_class(config=_UpperCAmelCase ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def lowerCamelCase ( self ): pass def lowerCAmelCase__ ( ) -> Optional[int]: UpperCAmelCase__ : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCamelCase ( self ): return ViTImageProcessor.from_pretrained('''MBZUAI/swiftformer-xs''' ) if is_vision_available() else None @slow def lowerCamelCase ( self ): UpperCAmelCase__ : int = SwiftFormerForImageClassification.from_pretrained('''MBZUAI/swiftformer-xs''' ).to(_UpperCAmelCase ) UpperCAmelCase__ : Tuple = self.default_image_processor UpperCAmelCase__ : str = prepare_img() UpperCAmelCase__ : Tuple = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' ).to(_UpperCAmelCase ) # forward pass with torch.no_grad(): UpperCAmelCase__ : Optional[Any] = model(**_UpperCAmelCase ) # verify the logits UpperCAmelCase__ : Optional[Any] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _UpperCAmelCase ) UpperCAmelCase__ : List[str] = torch.tensor([[-2.1703E00, 2.1107E00, -2.0811E00]] ).to(_UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
599
0
import argparse import os import re lowerCamelCase__ : Optional[int] = """src/transformers/models/auto""" # re pattern that matches mapping introductions: # SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict lowerCamelCase__ : List[Any] = re.compile(r"""[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict""") # re pattern that matches identifiers in mappings lowerCamelCase__ : Dict = re.compile(r"""\s*\(\s*\"(\S[^\"]+)\"""") def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase = False ) -> Union[str, Any]: with open(__lowerCAmelCase , '''r''' , encoding='''utf-8''' ) as f: snake_case__ = f.read() snake_case__ = content.split('''\n''' ) snake_case__ = [] snake_case__ = 0 while line_idx < len(__lowerCAmelCase ): if _re_intro_mapping.search(lines[line_idx] ) is not None: snake_case__ = len(re.search(r'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8 # Start of a new mapping! while not lines[line_idx].startswith(''' ''' * indent + '''(''' ): new_lines.append(lines[line_idx] ) line_idx += 1 snake_case__ = [] while lines[line_idx].strip() != "]": # Blocks either fit in one line or not if lines[line_idx].strip() == "(": snake_case__ = line_idx while not lines[line_idx].startswith(''' ''' * indent + ''')''' ): line_idx += 1 blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) ) else: blocks.append(lines[line_idx] ) line_idx += 1 # Sort blocks by their identifiers snake_case__ = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : _re_identifier.search(__lowerCAmelCase ).groups()[0] ) new_lines += blocks else: new_lines.append(lines[line_idx] ) line_idx += 1 if overwrite: with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f: f.write('''\n'''.join(__lowerCAmelCase ) ) elif "\n".join(__lowerCAmelCase ) != content: return True def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = False ) -> Tuple: snake_case__ = [os.path.join(__lowerCAmelCase , __lowerCAmelCase ) for f in os.listdir(__lowerCAmelCase ) if f.endswith('''.py''' )] snake_case__ = [sort_auto_mapping(__lowerCAmelCase , overwrite=__lowerCAmelCase ) for fname in fnames] if not overwrite and any(__lowerCAmelCase ): snake_case__ = [f for f, d in zip(__lowerCAmelCase , __lowerCAmelCase ) if d] raise ValueError( F"""The following files have auto mappings that need sorting: {', '.join(__lowerCAmelCase )}. Run `make style` to fix""" ''' this.''' ) if __name__ == "__main__": lowerCamelCase__ : Any = argparse.ArgumentParser() parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""") lowerCamelCase__ : Union[str, Any] = parser.parse_args() sort_all_auto_mappings(not args.check_only)
33
from copy import deepcopy class __magic_name__ : '''simple docstring''' def __init__( self:int , _a:list[int] | None = None , _a:int | None = None ): if arr is None and size is not None: snake_case__ = size snake_case__ = [0] * size elif arr is not None: self.init(_a ) else: raise ValueError('''Either arr or size must be specified''' ) def SCREAMING_SNAKE_CASE__ ( self:Any , _a:list[int] ): snake_case__ = len(_a ) snake_case__ = deepcopy(_a ) for i in range(1 , self.size ): snake_case__ = self.next_(_a ) if j < self.size: self.tree[j] += self.tree[i] def SCREAMING_SNAKE_CASE__ ( self:Any ): snake_case__ = self.tree[:] for i in range(self.size - 1 , 0 , -1 ): snake_case__ = self.next_(_a ) if j < self.size: arr[j] -= arr[i] return arr @staticmethod def SCREAMING_SNAKE_CASE__ ( _a:int ): return index + (index & (-index)) @staticmethod def SCREAMING_SNAKE_CASE__ ( _a:int ): return index - (index & (-index)) def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:int , _a:int ): if index == 0: self.tree[0] += value return while index < self.size: self.tree[index] += value snake_case__ = self.next_(_a ) def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int , _a:int ): self.add(_a , value - self.get(_a ) ) def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:int ): if right == 0: return 0 snake_case__ = self.tree[0] right -= 1 # make right inclusive while right > 0: result += self.tree[right] snake_case__ = self.prev(_a ) return result def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int , _a:int ): return self.prefix(_a ) - self.prefix(_a ) def SCREAMING_SNAKE_CASE__ ( self:str , _a:int ): return self.query(_a , index + 1 ) def SCREAMING_SNAKE_CASE__ ( self:str , _a:int ): value -= self.tree[0] if value < 0: return -1 snake_case__ = 1 # Largest power of 2 <= size while j * 2 < self.size: j *= 2 snake_case__ = 0 while j > 0: if i + j < self.size and self.tree[i + j] <= value: value -= self.tree[i + j] i += j j //= 2 return i if __name__ == "__main__": import doctest doctest.testmod()
33
1
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class UpperCamelCase ( unittest.TestCase ): def _lowercase (self : Union[str, Any]) -> Optional[Any]: # For consistency across different places the DisjunctiveConstraint is called, # dc.token_ids is a list of integers. It is also initialized only by integers. __snake_case : Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]] __snake_case : str = DisjunctiveConstraint(_A) self.assertTrue(isinstance(dc.token_ids , _A)) with self.assertRaises(_A): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]])) with self.assertRaises(_A): DisjunctiveConstraint([torch.LongTensor([1, 2, 4]), torch.LongTensor([1, 2, 3, 4, 5])]) def _lowercase (self : str) -> List[Any]: # We can't have constraints that are complete subsets of another. This leads to a preverse # interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint? # It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially # fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm # will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it). __snake_case : Optional[Any] = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(_A): DisjunctiveConstraint(_A) # fails here def _lowercase (self : Optional[int]) -> Optional[int]: __snake_case : List[Any] = [[1, 2, 3], [1, 2, 4]] __snake_case : Any = DisjunctiveConstraint(_A) __snake_case , __snake_case , __snake_case : int = dc.update(1) __snake_case : Union[str, Any] = stepped is True and completed is False and reset is False self.assertTrue(_A) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1]) __snake_case , __snake_case , __snake_case : List[str] = dc.update(2) __snake_case : Union[str, Any] = stepped is True and completed is False and reset is False self.assertTrue(_A) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1, 2]) __snake_case , __snake_case , __snake_case : Optional[Any] = dc.update(3) __snake_case : List[str] = stepped is True and completed is True and reset is False self.assertTrue(_A) self.assertTrue(dc.completed) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3]) def _lowercase (self : int) -> int: __snake_case : Optional[int] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] __snake_case : Optional[Any] = DisjunctiveConstraint(_A) __snake_case , __snake_case , __snake_case : Optional[Any] = dc.update(1) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1]) __snake_case , __snake_case , __snake_case : Dict = dc.update(2) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1, 2]) __snake_case , __snake_case , __snake_case : Tuple = dc.update(4) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1, 2, 4]) __snake_case , __snake_case , __snake_case : List[Any] = dc.update(5) self.assertTrue(dc.completed) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5]) dc.reset() __snake_case , __snake_case , __snake_case : Tuple = dc.update(1) self.assertTrue(not dc.completed) self.assertTrue(dc.remaining() == 3) self.assertTrue(dc.current_seq == [1]) __snake_case , __snake_case , __snake_case : str = dc.update(2) self.assertTrue(not dc.completed) self.assertTrue(dc.remaining() == 2) self.assertTrue(dc.current_seq == [1, 2]) __snake_case , __snake_case , __snake_case : Dict = dc.update(5) self.assertTrue(dc.completed) # Completed! self.assertTrue(dc.remaining() == 0) self.assertTrue(dc.current_seq == [1, 2, 5])
192
"""simple docstring""" import string import numpy def __UpperCAmelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int: '''simple docstring''' return b if a == 0 else greatest_common_divisor(b % a , UpperCAmelCase_ ) class UpperCamelCase : UpperCAmelCase : Any = string.ascii_uppercase + string.digits # This cipher takes alphanumerics into account # i.e. a total of 36 characters # take x and return x % len(key_string) UpperCAmelCase : List[Any] = numpy.vectorize(lambda lowercase : x % 36 ) UpperCAmelCase : Dict = numpy.vectorize(lowercase ) def __init__(self : str , _A : numpy.ndarray) -> None: __snake_case : str = self.modulus(_A) # mod36 calc's on the encrypt key self.check_determinant() # validate the determinant of the encryption key __snake_case : Optional[Any] = encrypt_key.shape[0] def _lowercase (self : Any , _A : str) -> int: return self.key_string.index(_A) def _lowercase (self : Union[str, Any] , _A : int) -> str: return self.key_string[round(_A)] def _lowercase (self : Optional[int]) -> None: __snake_case : Any = round(numpy.linalg.det(self.encrypt_key)) if det < 0: __snake_case : Any = det % len(self.key_string) __snake_case : Tuple = len(self.key_string) if greatest_common_divisor(_A , len(self.key_string)) != 1: __snake_case : List[str] = ( f"determinant modular {req_l} of encryption key({det}) " f"is not co prime w.r.t {req_l}.\nTry another key." ) raise ValueError(_A) def _lowercase (self : Dict , _A : str) -> str: __snake_case : str = [char for char in text.upper() if char in self.key_string] __snake_case : int = chars[-1] while len(_A) % self.break_key != 0: chars.append(_A) return "".join(_A) def _lowercase (self : Union[str, Any] , _A : str) -> str: __snake_case : Any = self.process_text(text.upper()) __snake_case : Dict = '' for i in range(0 , len(_A) - self.break_key + 1 , self.break_key): __snake_case : Dict = text[i : i + self.break_key] __snake_case : List[str] = [self.replace_letters(_A) for char in batch] __snake_case : str = numpy.array([vec]).T __snake_case : List[Any] = self.modulus(self.encrypt_key.dot(_A)).T.tolist()[ 0 ] __snake_case : str = ''.join( self.replace_digits(_A) for num in batch_encrypted) encrypted += encrypted_batch return encrypted def _lowercase (self : Optional[int]) -> numpy.ndarray: __snake_case : List[Any] = round(numpy.linalg.det(self.encrypt_key)) if det < 0: __snake_case : int = det % len(self.key_string) __snake_case : Optional[Any] = None for i in range(len(self.key_string)): if (det * i) % len(self.key_string) == 1: __snake_case : Dict = i break __snake_case : List[str] = ( det_inv * numpy.linalg.det(self.encrypt_key) * numpy.linalg.inv(self.encrypt_key) ) return self.to_int(self.modulus(_A)) def _lowercase (self : int , _A : str) -> str: __snake_case : int = self.make_decrypt_key() __snake_case : List[str] = self.process_text(text.upper()) __snake_case : str = '' for i in range(0 , len(_A) - self.break_key + 1 , self.break_key): __snake_case : Optional[Any] = text[i : i + self.break_key] __snake_case : Union[str, Any] = [self.replace_letters(_A) for char in batch] __snake_case : Tuple = numpy.array([vec]).T __snake_case : List[str] = self.modulus(decrypt_key.dot(_A)).T.tolist()[0] __snake_case : str = ''.join( self.replace_digits(_A) for num in batch_decrypted) decrypted += decrypted_batch return decrypted def __UpperCAmelCase ( ) -> None: '''simple docstring''' __snake_case : List[str] = int(input('Enter the order of the encryption key: ' ) ) __snake_case : str = [] print('Enter each row of the encryption key with space separated integers' ) for _ in range(UpperCAmelCase_ ): __snake_case : Union[str, Any] = [int(UpperCAmelCase_ ) for x in input().split()] hill_matrix.append(UpperCAmelCase_ ) __snake_case : Dict = HillCipher(numpy.array(UpperCAmelCase_ ) ) print('Would you like to encrypt or decrypt some text? (1 or 2)' ) __snake_case : Optional[Any] = input('\n1. Encrypt\n2. Decrypt\n' ) if option == "1": __snake_case : int = input('What text would you like to encrypt?: ' ) print('Your encrypted text is:' ) print(hc.encrypt(UpperCAmelCase_ ) ) elif option == "2": __snake_case : Tuple = input('What text would you like to decrypt?: ' ) print('Your decrypted text is:' ) print(hc.decrypt(UpperCAmelCase_ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
192
1
import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = BertConfig.from_json_file(A_ ) print(f'''Building PyTorch model from configuration: {config}''' ) __magic_name__ = BertForPreTraining(A_ ) # Load weights from tf checkpoint load_tf_weights_in_bert(A_, A_, A_ ) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict(), A_ ) if __name__ == "__main__": __lowerCAmelCase : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--bert_config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained BERT model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __lowerCAmelCase : Optional[Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
529
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Dict = logging.get_logger(__name__) __lowerCAmelCase : int = { 'microsoft/trocr-base-handwritten': ( 'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json' ), # See all TrOCR models at https://huggingface.co/models?filter=trocr } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """trocr""" a__ = ["""past_key_values"""] a__ = { """num_attention_heads""": """decoder_attention_heads""", """hidden_size""": """d_model""", """num_hidden_layers""": """decoder_layers""", } def __init__( self : Dict , UpperCamelCase__ : Optional[Any]=5_0265 , UpperCamelCase__ : int=1024 , UpperCamelCase__ : int=12 , UpperCamelCase__ : List[Any]=16 , UpperCamelCase__ : Union[str, Any]=4096 , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : Optional[int]=512 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : int=0.0 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=False , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : str=1 , UpperCamelCase__ : Union[str, Any]=0 , UpperCamelCase__ : List[str]=2 , **UpperCamelCase__ : Tuple , ) -> Dict: """simple docstring""" __magic_name__ = vocab_size __magic_name__ = d_model __magic_name__ = decoder_layers __magic_name__ = decoder_attention_heads __magic_name__ = decoder_ffn_dim __magic_name__ = activation_function __magic_name__ = max_position_embeddings __magic_name__ = dropout __magic_name__ = attention_dropout __magic_name__ = activation_dropout __magic_name__ = init_std __magic_name__ = decoder_layerdrop __magic_name__ = use_cache __magic_name__ = scale_embedding __magic_name__ = use_learned_position_embeddings __magic_name__ = layernorm_embedding super().__init__( pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
529
1
"""simple docstring""" from importlib import import_module from .logging import get_logger __a = get_logger(__name__) class lowerCamelCase : '''simple docstring''' def __init__( self: Any , snake_case: List[Any] , snake_case: List[Any]=None ) -> Union[str, Any]: snake_case_ :str = attrs or [] if module is not None: for key in module.__dict__: if key in attrs or not key.startswith("""__""" ): setattr(self , snake_case , getattr(snake_case , snake_case ) ) snake_case_ :Optional[int] = module._original_module if isinstance(snake_case , _PatchedModuleObj ) else module class lowerCamelCase : '''simple docstring''' _A : Union[str, Any] = [] def __init__( self: Optional[Any] , snake_case: List[str] , snake_case: str , snake_case: int , snake_case: Dict=None ) -> Any: snake_case_ :Union[str, Any] = obj snake_case_ :List[str] = target snake_case_ :Any = new snake_case_ :Optional[Any] = target.split(""".""" )[0] snake_case_ :Tuple = {} snake_case_ :List[str] = attrs or [] def __enter__( self: Dict ) -> Optional[Any]: *snake_case_, snake_case_ :List[Any] = self.target.split(""".""" ) # Patch modules: # it's used to patch attributes of submodules like "os.path.join"; # in this case we need to patch "os" and "os.path" for i in range(len(snake_case ) ): try: snake_case_ :Optional[int] = import_module(""".""".join(submodules[: i + 1] ) ) except ModuleNotFoundError: continue # We iterate over all the globals in self.obj in case we find "os" or "os.path" for attr in self.obj.__dir__(): snake_case_ :Optional[Any] = getattr(self.obj , snake_case ) # We don't check for the name of the global, but rather if its value *is* "os" or "os.path". # This allows to patch renamed modules like "from os import path as ospath". if obj_attr is submodule or ( (isinstance(snake_case , _PatchedModuleObj ) and obj_attr._original_module is submodule) ): snake_case_ :int = obj_attr # patch at top level setattr(self.obj , snake_case , _PatchedModuleObj(snake_case , attrs=self.attrs ) ) snake_case_ :Optional[Any] = getattr(self.obj , snake_case ) # construct lower levels patches for key in submodules[i + 1 :]: setattr(snake_case , snake_case , _PatchedModuleObj(getattr(snake_case , snake_case , snake_case ) , attrs=self.attrs ) ) snake_case_ :int = getattr(snake_case , snake_case ) # finally set the target attribute setattr(snake_case , snake_case , self.new ) # Patch attribute itself: # it's used for builtins like "open", # and also to patch "os.path.join" we may also need to patch "join" # itself if it was imported as "from os.path import join". if submodules: # if it's an attribute of a submodule like "os.path.join" try: snake_case_ :Tuple = getattr(import_module(""".""".join(snake_case ) ) , snake_case ) except (AttributeError, ModuleNotFoundError): return # We iterate over all the globals in self.obj in case we find "os.path.join" for attr in self.obj.__dir__(): # We don't check for the name of the global, but rather if its value *is* "os.path.join". # This allows to patch renamed attributes like "from os.path import join as pjoin". if getattr(self.obj , snake_case ) is attr_value: snake_case_ :Union[str, Any] = getattr(self.obj , snake_case ) setattr(self.obj , snake_case , self.new ) elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open" snake_case_ :Dict = globals()["""__builtins__"""][target_attr] setattr(self.obj , snake_case , self.new ) else: raise RuntimeError(f"""Tried to patch attribute {target_attr} instead of a submodule.""" ) def __exit__( self: List[Any] , *snake_case: List[Any] ) -> int: for attr in list(self.original ): setattr(self.obj , snake_case , self.original.pop(snake_case ) ) def lowerCAmelCase_ ( self: List[str] ) -> int: self.__enter__() self._active_patches.append(self ) def lowerCAmelCase_ ( self: int ) -> Optional[Any]: try: self._active_patches.remove(self ) except ValueError: # If the patch hasn't been started this will fail return None return self.__exit__()
310
"""simple docstring""" import contextlib from multiprocessing import Pool, RLock from tqdm.auto import tqdm from ..utils import experimental, logging __a = logging.get_logger(__name__) class lowerCamelCase : '''simple docstring''' _A : Union[str, Any] = None @experimental def A_ ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ): '''simple docstring''' if ParallelBackendConfig.backend_name is None: return _map_with_multiprocessing_pool( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ) return _map_with_joblib(_lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ) def A_ ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ): '''simple docstring''' snake_case_ :List[str] = num_proc if num_proc <= len(_lowercase ) else len(_lowercase ) snake_case_ :int = [] # We organize the splits ourselve (contiguous splits) for index in range(_lowercase ): snake_case_ :List[str] = len(_lowercase ) // num_proc snake_case_ :Any = len(_lowercase ) % num_proc snake_case_ :Optional[int] = div * index + min(_lowercase, _lowercase ) snake_case_ :Union[str, Any] = start + div + (1 if index < mod else 0) split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) ) if len(_lowercase ) != sum(len(i[1] ) for i in split_kwds ): raise ValueError( f"""Error dividing inputs iterable among processes. """ f"""Total number of objects {len(_lowercase )}, """ f"""length: {sum(len(i[1] ) for i in split_kwds )}""" ) logger.info( f"""Spawning {num_proc} processes for {len(_lowercase )} objects in slices of {[len(i[1] ) for i in split_kwds]}""" ) snake_case_, snake_case_ :Optional[int] = None, None if not disable_tqdm: snake_case_, snake_case_ :List[str] = (RLock(),), tqdm.set_lock with Pool(_lowercase, initargs=_lowercase, initializer=_lowercase ) as pool: snake_case_ :Optional[Any] = pool.map(_lowercase, _lowercase ) logger.info(f"""Finished {num_proc} processes""" ) snake_case_ :Optional[int] = [obj for proc_res in mapped for obj in proc_res] logger.info(f"""Unpacked {len(_lowercase )} objects""" ) return mapped def A_ ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ): '''simple docstring''' import joblib with joblib.parallel_backend(ParallelBackendConfig.backend_name, n_jobs=_lowercase ): return joblib.Parallel()( joblib.delayed(_lowercase )((function, obj, types, None, True, None) ) for obj in iterable ) @experimental @contextlib.contextmanager def A_ ( _lowercase ): '''simple docstring''' snake_case_ :Dict = backend_name if backend_name == "spark": from joblibspark import register_spark register_spark() # TODO: call create_cache_and_write_probe if "download" in steps # TODO: raise NotImplementedError when Dataset.map etc is called try: yield finally: snake_case_ :Optional[int] = None
310
1
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCAmelCase_ ( UpperCAmelCase ): __UpperCAmelCase =['image_processor', 'tokenizer'] __UpperCAmelCase ='CLIPImageProcessor' __UpperCAmelCase =('CLIPTokenizer', 'CLIPTokenizerFast') def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase )-> Tuple: _A = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , _UpperCamelCase , ) _A = kwargs.pop('feature_extractor' ) _A = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(_UpperCamelCase , _UpperCamelCase ) def __call__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase )-> List[Any]: if text is None and images is None: raise ValueError('You have to specify either text or images. Both cannot be none.' ) if text is not None: _A = self.tokenizer(_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase ) if images is not None: _A = self.image_processor(_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase ) if text is not None and images is not None: _A = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_UpperCamelCase ) , tensor_type=_UpperCamelCase ) def UpperCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase )-> Tuple: return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase ) def UpperCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase )-> List[str]: return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase ) @property def UpperCamelCase ( self )-> Optional[Any]: _A = self.tokenizer.model_input_names _A = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def UpperCamelCase ( self )-> str: warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _UpperCamelCase , ) return self.image_processor_class @property def UpperCamelCase ( self )-> str: warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _UpperCamelCase , ) return self.image_processor
292
'''simple docstring''' from __future__ import annotations def lowerCamelCase_ ( __UpperCamelCase : dict , __UpperCamelCase : str ) -> set[str]: """simple docstring""" _A , _A = set(__UpperCamelCase ), [start] while stack: _A = stack.pop() explored.add(__UpperCamelCase ) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v] ): if adj not in explored: stack.append(__UpperCamelCase ) return explored lowerCAmelCase = { """A""": ["""B""", """C""", """D"""], """B""": ["""A""", """D""", """E"""], """C""": ["""A""", """F"""], """D""": ["""B""", """D"""], """E""": ["""B""", """F"""], """F""": ["""C""", """E""", """G"""], """G""": ["""F"""], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, """A"""))
292
1
"""simple docstring""" from abc import ABC, abstractmethod from typing import List, Optional class lowerCamelCase__ ( snake_case ): def __init__( self ): # test for the above condition self.test() def _UpperCamelCase ( self ): UpperCAmelCase = 0 UpperCAmelCase = False while not completed: if counter == 1: self.reset() UpperCAmelCase = self.advance() if not self.does_advance(A ): raise Exception( """Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.""" ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self.update(A ) counter += 1 if counter > 10_000: raise Exception("""update() does not fulfill the constraint.""" ) if self.remaining() != 0: raise Exception("""Custom Constraint is not defined correctly.""" ) @abstractmethod def _UpperCamelCase ( self ): raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def _UpperCamelCase ( self ,A ): raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def _UpperCamelCase ( self ,A ): raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def _UpperCamelCase ( self ): raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def _UpperCamelCase ( self ): raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def _UpperCamelCase ( self ,A=False ): raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class lowerCamelCase__ ( snake_case ): def __init__( self ,A ): super(A ,self ).__init__() if not isinstance(A ,A ) or len(A ) == 0: raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' ) if any((not isinstance(A ,A ) or token_id < 0) for token_id in token_ids ): raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' ) UpperCAmelCase = token_ids UpperCAmelCase = len(self.token_ids ) UpperCAmelCase = -1 # the index of the currently fulfilled step UpperCAmelCase = False def _UpperCamelCase ( self ): if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def _UpperCamelCase ( self ,A ): if not isinstance(A ,A ): raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(A )}''' ) if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def _UpperCamelCase ( self ,A ): if not isinstance(A ,A ): raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(A )}''' ) UpperCAmelCase = False UpperCAmelCase = False UpperCAmelCase = False if self.does_advance(A ): self.fulfilled_idx += 1 UpperCAmelCase = True if self.fulfilled_idx == (self.seqlen - 1): UpperCAmelCase = True UpperCAmelCase = completed else: # failed to make progress. UpperCAmelCase = True self.reset() return stepped, completed, reset def _UpperCamelCase ( self ): UpperCAmelCase = False UpperCAmelCase = 0 def _UpperCamelCase ( self ): return self.seqlen - (self.fulfilled_idx + 1) def _UpperCamelCase ( self ,A=False ): UpperCAmelCase = PhrasalConstraint(self.token_ids ) if stateful: UpperCAmelCase = self.seqlen UpperCAmelCase = self.fulfilled_idx UpperCAmelCase = self.completed return new_constraint class lowerCamelCase__ : def __init__( self ,A ,A=True ): UpperCAmelCase = max([len(A ) for one in nested_token_ids] ) UpperCAmelCase = {} for token_ids in nested_token_ids: UpperCAmelCase = root for tidx, token_id in enumerate(A ): if token_id not in level: UpperCAmelCase = {} UpperCAmelCase = level[token_id] if no_subsets and self.has_subsets(A ,A ): raise ValueError( """Each list in `nested_token_ids` can't be a complete subset of another list, but is""" F''' {nested_token_ids}.''' ) UpperCAmelCase = root def _UpperCamelCase ( self ,A ): UpperCAmelCase = self.trie for current_token in current_seq: UpperCAmelCase = start[current_token] UpperCAmelCase = list(start.keys() ) return next_tokens def _UpperCamelCase ( self ,A ): UpperCAmelCase = self.next_tokens(A ) return len(A ) == 0 def _UpperCamelCase ( self ,A ): UpperCAmelCase = list(root.values() ) if len(A ) == 0: return 1 else: return sum([self.count_leaves(A ) for nn in next_nodes] ) def _UpperCamelCase ( self ,A ,A ): UpperCAmelCase = self.count_leaves(A ) return len(A ) != leaf_count class lowerCamelCase__ ( snake_case ): def __init__( self ,A ): super(A ,self ).__init__() if not isinstance(A ,A ) or len(A ) == 0: raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' ) if any(not isinstance(A ,A ) for token_ids in nested_token_ids ): raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' ) if any( any((not isinstance(A ,A ) or token_id < 0) for token_id in token_ids ) for token_ids in nested_token_ids ): raise ValueError( F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' ) UpperCAmelCase = DisjunctiveTrie(A ) UpperCAmelCase = nested_token_ids UpperCAmelCase = self.trie.max_height UpperCAmelCase = [] UpperCAmelCase = False def _UpperCamelCase ( self ): UpperCAmelCase = self.trie.next_tokens(self.current_seq ) if len(A ) == 0: return None else: return token_list def _UpperCamelCase ( self ,A ): if not isinstance(A ,A ): raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(A )}''' ) UpperCAmelCase = self.trie.next_tokens(self.current_seq ) return token_id in next_tokens def _UpperCamelCase ( self ,A ): if not isinstance(A ,A ): raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(A )}''' ) UpperCAmelCase = False UpperCAmelCase = False UpperCAmelCase = False if self.does_advance(A ): self.current_seq.append(A ) UpperCAmelCase = True else: UpperCAmelCase = True self.reset() UpperCAmelCase = self.trie.reached_leaf(self.current_seq ) UpperCAmelCase = completed return stepped, completed, reset def _UpperCamelCase ( self ): UpperCAmelCase = False UpperCAmelCase = [] def _UpperCamelCase ( self ): if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq ) def _UpperCamelCase ( self ,A=False ): UpperCAmelCase = DisjunctiveConstraint(self.token_ids ) if stateful: UpperCAmelCase = self.seqlen UpperCAmelCase = self.current_seq UpperCAmelCase = self.completed return new_constraint class lowerCamelCase__ : def __init__( self ,A ): UpperCAmelCase = constraints # max # of steps required to fulfill a given constraint UpperCAmelCase = max([c.seqlen for c in constraints] ) UpperCAmelCase = len(A ) UpperCAmelCase = False self.init_state() def _UpperCamelCase ( self ): UpperCAmelCase = [] UpperCAmelCase = None UpperCAmelCase = [constraint.copy(stateful=A ) for constraint in self.constraints] def _UpperCamelCase ( self ): UpperCAmelCase = 0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints ) * self.max_seqlen) + add def _UpperCamelCase ( self ): UpperCAmelCase = [] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" UpperCAmelCase = constraint.advance() if isinstance(A ,A ): token_list.append(A ) elif isinstance(A ,A ): token_list.extend(A ) else: UpperCAmelCase = self.inprogress_constraint.advance() if isinstance(A ,A ): token_list.append(A ) elif isinstance(A ,A ): token_list.extend(A ) if len(A ) == 0: return None else: return token_list def _UpperCamelCase ( self ,A ): self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint UpperCAmelCase , UpperCAmelCase = self.add(A ) # the entire list of constraints are fulfilled if self.completed: break def _UpperCamelCase ( self ,A ): if not isinstance(A ,A ): raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' ) UpperCAmelCase , UpperCAmelCase = False, False if self.completed: UpperCAmelCase = True UpperCAmelCase = False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self.inprogress_constraint.update(A ) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=A ) ) UpperCAmelCase = None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint ) UpperCAmelCase = None if len(self.pending_constraints ) == 0: # we're done! UpperCAmelCase = True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints ): if pending_constraint.does_advance(A ): UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = pending_constraint.update(A ) if not stepped: raise Exception( """`constraint.update(token_id)` is not yielding incremental progress, """ """even though `constraint.does_advance(token_id)` is true.""" ) if complete: self.complete_constraints.append(A ) UpperCAmelCase = None if not complete and stepped: UpperCAmelCase = pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". UpperCAmelCase = ( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. UpperCAmelCase = True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def _UpperCamelCase ( self ,A=True ): UpperCAmelCase = ConstraintListState(self.constraints ) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: UpperCAmelCase = [ constraint.copy(stateful=A ) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: UpperCAmelCase = self.inprogress_constraint.copy(stateful=A ) UpperCAmelCase = [constraint.copy() for constraint in self.pending_constraints] return new_state
74
"""simple docstring""" from __future__ import annotations def _a ( _snake_case ): """simple docstring""" return len(set(_snake_case ) ) == len(_snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
74
1
import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL lowercase_ = logging.get_logger(__name__) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): def constraint_to_multiple_of(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=None ): lowercase__ = round(val / multiple ) * multiple if max_val is not None and x > max_val: lowercase__ = math.floor(val / multiple ) * multiple if x < min_val: lowercase__ = math.ceil(val / multiple ) * multiple return x lowercase__ = (output_size, output_size) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else output_size lowercase__ , lowercase__ = get_image_size(SCREAMING_SNAKE_CASE_ ) lowercase__ , lowercase__ = output_size # determine new height and width lowercase__ = output_height / input_height lowercase__ = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width lowercase__ = scale_width else: # fit height lowercase__ = scale_height lowercase__ = constraint_to_multiple_of(scale_height * input_height , multiple=SCREAMING_SNAKE_CASE_ ) lowercase__ = constraint_to_multiple_of(scale_width * input_width , multiple=SCREAMING_SNAKE_CASE_ ) return (new_height, new_width) class _snake_case ( lowercase__): UpperCamelCase__ : Tuple =["""pixel_values"""] def __init__( self : Any, __lowercase : bool = True, __lowercase : Dict[str, int] = None, __lowercase : PILImageResampling = PILImageResampling.BILINEAR, __lowercase : bool = False, __lowercase : int = 1, __lowercase : bool = True, __lowercase : Union[int, float] = 1 / 255, __lowercase : bool = True, __lowercase : Optional[Union[float, List[float]]] = None, __lowercase : Optional[Union[float, List[float]]] = None, **__lowercase : List[Any], ): super().__init__(**__lowercase ) lowercase__ = size if size is not None else {"height": 384, "width": 384} lowercase__ = get_size_dict(__lowercase ) lowercase__ = do_resize lowercase__ = size lowercase__ = keep_aspect_ratio lowercase__ = ensure_multiple_of lowercase__ = resample lowercase__ = do_rescale lowercase__ = rescale_factor lowercase__ = do_normalize lowercase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowercase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD def A__ ( self : List[Any], __lowercase : np.ndarray, __lowercase : Dict[str, int], __lowercase : bool = False, __lowercase : int = 1, __lowercase : PILImageResampling = PILImageResampling.BICUBIC, __lowercase : Optional[Union[str, ChannelDimension]] = None, **__lowercase : Union[str, Any], ): lowercase__ = get_size_dict(__lowercase ) if "height" not in size or "width" not in size: raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' ) lowercase__ = get_resize_output_image_size( __lowercase, output_size=(size["height"], size["width"]), keep_aspect_ratio=__lowercase, multiple=__lowercase, ) return resize(__lowercase, size=__lowercase, resample=__lowercase, data_format=__lowercase, **__lowercase ) def A__ ( self : str, __lowercase : np.ndarray, __lowercase : Union[int, float], __lowercase : Optional[Union[str, ChannelDimension]] = None, **__lowercase : List[Any], ): return rescale(__lowercase, scale=__lowercase, data_format=__lowercase, **__lowercase ) def A__ ( self : Any, __lowercase : np.ndarray, __lowercase : Union[float, List[float]], __lowercase : Union[float, List[float]], __lowercase : Optional[Union[str, ChannelDimension]] = None, **__lowercase : Optional[Any], ): return normalize(__lowercase, mean=__lowercase, std=__lowercase, data_format=__lowercase, **__lowercase ) def A__ ( self : List[str], __lowercase : ImageInput, __lowercase : bool = None, __lowercase : int = None, __lowercase : bool = None, __lowercase : int = None, __lowercase : PILImageResampling = None, __lowercase : bool = None, __lowercase : float = None, __lowercase : bool = None, __lowercase : Optional[Union[float, List[float]]] = None, __lowercase : Optional[Union[float, List[float]]] = None, __lowercase : Optional[Union[str, TensorType]] = None, __lowercase : ChannelDimension = ChannelDimension.FIRST, **__lowercase : Tuple, ): lowercase__ = do_resize if do_resize is not None else self.do_resize lowercase__ = size if size is not None else self.size lowercase__ = get_size_dict(__lowercase ) lowercase__ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio lowercase__ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of lowercase__ = resample if resample is not None else self.resample lowercase__ = do_rescale if do_rescale is not None else self.do_rescale lowercase__ = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase__ = do_normalize if do_normalize is not None else self.do_normalize lowercase__ = image_mean if image_mean is not None else self.image_mean lowercase__ = image_std if image_std is not None else self.image_std lowercase__ = make_list_of_images(__lowercase ) if not valid_images(__lowercase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. lowercase__ = [to_numpy_array(__lowercase ) for image in images] if do_resize: lowercase__ = [self.resize(image=__lowercase, size=__lowercase, resample=__lowercase ) for image in images] if do_rescale: lowercase__ = [self.rescale(image=__lowercase, scale=__lowercase ) for image in images] if do_normalize: lowercase__ = [self.normalize(image=__lowercase, mean=__lowercase, std=__lowercase ) for image in images] lowercase__ = [to_channel_dimension_format(__lowercase, __lowercase ) for image in images] lowercase__ = {"pixel_values": images} return BatchFeature(data=__lowercase, tensor_type=__lowercase ) def A__ ( self : int, __lowercase : Optional[Any], __lowercase : List[Tuple] = None ): lowercase__ = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(__lowercase ) != len(__lowercase ): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) if is_torch_tensor(__lowercase ): lowercase__ = target_sizes.numpy() lowercase__ = [] for idx in range(len(__lowercase ) ): lowercase__ = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ), size=target_sizes[idx], mode="bilinear", align_corners=__lowercase ) lowercase__ = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(__lowercase ) else: lowercase__ = logits.argmax(dim=1 ) lowercase__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
413
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase_ = { """configuration_table_transformer""": [ """TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TableTransformerConfig""", """TableTransformerOnnxConfig""", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TableTransformerForObjectDetection""", """TableTransformerModel""", """TableTransformerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_table_transformer import ( TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TableTransformerConfig, TableTransformerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_table_transformer import ( TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TableTransformerForObjectDetection, TableTransformerModel, TableTransformerPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
413
1
from statistics import mean, stdev def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ = 3 ): A : Optional[int] = min(lowerCamelCase_ ) A : Dict = max(lowerCamelCase_ ) # normalize data return [round((x - x_min) / (x_max - x_min) , lowerCamelCase_ ) for x in data] def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ = 3 ): A : Union[str, Any] = mean(lowerCamelCase_ ) A : str = stdev(lowerCamelCase_ ) # standardize data return [round((x - mu) / (sigma) , lowerCamelCase_ ) for x in data]
423
import collections import json import math import os import re import time from fnmatch import fnmatch from typing import Dict import requests from slack_sdk import WebClient lowercase : Union[str, Any] = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"]) def snake_case__ ( lowerCamelCase_ ): A : Optional[Any] = test_results.split(''' ''' ) A : List[str] = 0 A : str = 0 # When the output is short enough, the output is surrounded by = signs: "== OUTPUT ==" # When it is too long, those signs are not present. A : Optional[int] = expressions[-2] if '''=''' in expressions[-1] else expressions[-1] for i, expression in enumerate(lowerCamelCase_ ): if "failed" in expression: failed += int(expressions[i - 1] ) if "passed" in expression: success += int(expressions[i - 1] ) return failed, success, time_spent def snake_case__ ( lowerCamelCase_ ): A : Any = {} A : List[str] = None A : Dict = False for line in failures_short_lines.split('''\n''' ): if re.search(r'''_ \[doctest\]''' , lowerCamelCase_ ): A : int = True A : List[str] = line.split(''' ''' )[2] elif in_error and not line.split(''' ''' )[0].isdigit(): A : Optional[int] = line A : int = False return failures class __lowercase : """simple docstring""" def __init__( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: A : Tuple = title A : List[Any] = doc_test_results['''time_spent'''].split(''',''' )[0] A : List[Any] = doc_test_results['''success'''] A : Optional[Any] = doc_test_results['''failures'''] A : str = self.n_success + self.n_failures # Failures and success of the modeling tests A : Optional[int] = doc_test_results @property def snake_case ( self ) -> str: A : List[str] = [self._time_spent] A : Dict = 0 for time in time_spent: A : Optional[Any] = time.split(''':''' ) # Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute. if len(__UpperCAmelCase ) == 1: A : List[str] = [0, 0, time_parts[0]] A , A , A : Union[str, Any] = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] ) total_secs += hours * 36_00 + minutes * 60 + seconds A , A , A : Union[str, Any] = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60 return f'{int(__UpperCAmelCase )}h{int(__UpperCAmelCase )}m{int(__UpperCAmelCase )}s' @property def snake_case ( self ) -> Dict: return {"type": "header", "text": {"type": "plain_text", "text": self.title}} @property def snake_case ( self ) -> Dict: return { "type": "section", "text": { "type": "plain_text", "text": f'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.', "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}', }, } @property def snake_case ( self ) -> Dict: return { "type": "section", "text": { "type": "plain_text", "text": ( f'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in' f' {self.time}.' ), "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}', }, } @property def snake_case ( self ) -> Dict: A : Optional[int] = 40 A : Dict = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(__UpperCAmelCase , __UpperCAmelCase )} A : List[str] = '''''' for category, failures in category_failures.items(): if len(__UpperCAmelCase ) == 0: continue if report != "": report += "\n\n" report += f'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n" report += "`" report += "`\n`".join(__UpperCAmelCase ) report += "`" return { "type": "section", "text": { "type": "mrkdwn", "text": f'The following examples had failures:\n\n\n{report}\n', }, } @property def snake_case ( self ) -> str: A : Any = [self.header] if self.n_failures > 0: blocks.append(self.failures ) if self.n_failures > 0: blocks.extend([self.category_failures] ) if self.n_failures == 0: blocks.append(self.no_failures ) return json.dumps(__UpperCAmelCase ) @staticmethod def snake_case ( ) -> Dict: A : int = [ { '''type''': '''section''', '''text''': { '''type''': '''plain_text''', '''text''': '''There was an issue running the tests.''', }, '''accessory''': { '''type''': '''button''', '''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True}, '''url''': f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}', }, } ] print('''Sending the following payload''' ) print(json.dumps({'''blocks''': json.loads(__UpperCAmelCase )} ) ) client.chat_postMessage( channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text='''There was an issue running the tests.''' , blocks=__UpperCAmelCase , ) def snake_case ( self ) -> Optional[Any]: print('''Sending the following payload''' ) print(json.dumps({'''blocks''': json.loads(self.payload )} ) ) A : Dict = f'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else '''All tests passed.''' A : Any = client.chat_postMessage( channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , blocks=self.payload , text=__UpperCAmelCase , ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]: A : Optional[int] = '''''' for key, value in failures.items(): A : Optional[int] = value[:2_00] + ''' [Truncated]''' if len(__UpperCAmelCase ) > 2_50 else value failures_text += f'*{key}*\n_{value}_\n\n' A : Optional[Any] = job_name A : List[str] = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}} if job_link is not None: A : Union[str, Any] = { '''type''': '''button''', '''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True}, '''url''': job_link, } return [ {"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}}, content, {"type": "section", "text": {"type": "mrkdwn", "text": failures_text}}, ] def snake_case ( self ) -> str: if self.thread_ts is None: raise ValueError('''Can only post reply if a post has been made.''' ) A : Union[str, Any] = self.doc_test_results.pop('''job_link''' ) self.doc_test_results.pop('''failures''' ) self.doc_test_results.pop('''success''' ) self.doc_test_results.pop('''time_spent''' ) A : str = sorted(self.doc_test_results.items() , key=lambda __UpperCAmelCase : t[0] ) for job, job_result in sorted_dict: if len(job_result['''failures'''] ): A : List[str] = f'*Num failures* :{len(job_result["failed"] )} \n' A : Union[str, Any] = job_result['''failures'''] A : str = self.get_reply_blocks(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , text=__UpperCAmelCase ) print('''Sending the following reply''' ) print(json.dumps({'''blocks''': blocks} ) ) client.chat_postMessage( channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text=f'Results for {job}' , blocks=__UpperCAmelCase , thread_ts=self.thread_ts['''ts'''] , ) time.sleep(1 ) def snake_case__ ( ): A : Optional[int] = os.environ['''GITHUB_RUN_ID'''] A : Optional[Any] = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100' A : Any = requests.get(lowerCamelCase_ ).json() A : Optional[Any] = {} try: jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} ) A : Optional[int] = math.ceil((result['''total_count'''] - 100) / 100 ) for i in range(lowerCamelCase_ ): A : str = requests.get(url + F'&page={i + 2}' ).json() jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} ) return jobs except Exception as e: print('''Unknown error, could not fetch links.''' , lowerCamelCase_ ) return {} def snake_case__ ( lowerCamelCase_ ): A : Any = {} if os.path.exists(lowerCamelCase_ ): A : Tuple = os.listdir(lowerCamelCase_ ) for file in files: try: with open(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , encoding='''utf-8''' ) as f: A : Any = f.read() except UnicodeDecodeError as e: raise ValueError(F'Could not open {os.path.join(lowerCamelCase_ , lowerCamelCase_ )}.' ) from e return _artifact def snake_case__ ( ): class __lowercase : """simple docstring""" def __init__( self , __UpperCAmelCase ) -> Tuple: A : Dict = name A : Dict = [] def __str__( self ) -> int: return self.name def snake_case ( self , __UpperCAmelCase ) -> List[str]: self.paths.append({'''name''': self.name, '''path''': path} ) A : Dict[str, Artifact] = {} A : Any = filter(os.path.isdir , os.listdir() ) for directory in directories: A : Optional[int] = directory if artifact_name not in _available_artifacts: A : Any = Artifact(lowerCamelCase_ ) _available_artifacts[artifact_name].add_path(lowerCamelCase_ ) return _available_artifacts if __name__ == "__main__": lowercase : Dict = get_job_links() lowercase : Union[str, Any] = retrieve_available_artifacts() lowercase : Any = collections.OrderedDict( [ ("*.py", "API Examples"), ("*.md", "MD Examples"), ] ) # This dict will contain all the information relative to each doc test category: # - failed: list of failed tests # - failures: dict in the format 'test': 'error_message' lowercase : int = { v: { "failed": [], "failures": {}, } for v in docs.values() } # Link to the GitHub Action job lowercase : Union[str, Any] = github_actions_job_links.get("run_doctests") lowercase : List[str] = available_artifacts["doc_tests_gpu_test_reports"].paths[0] lowercase : Optional[Any] = retrieve_artifact(artifact_path["name"]) if "stats" in artifact: lowercase , lowercase , lowercase : Union[str, Any] = handle_test_results(artifact["stats"]) lowercase : Optional[int] = failed lowercase : str = success lowercase : int = time_spent[1:-1] + ", " lowercase : Union[str, Any] = extract_first_line_failure(artifact["failures_short"]) for line in artifact["summary_short"].split("\n"): if re.search("FAILED", line): lowercase : Union[str, Any] = line.replace("FAILED ", "") lowercase : Any = line.split()[0].replace("\n", "") if "::" in line: lowercase , lowercase : Tuple = line.split("::") else: lowercase , lowercase : int = line, line for file_regex in docs.keys(): if fnmatch(file_path, file_regex): lowercase : Tuple = docs[file_regex] doc_test_results[category]["failed"].append(test) lowercase : List[Any] = all_failures[test] if test in all_failures else "N/A" lowercase : List[str] = failure break lowercase : Optional[int] = Message("🤗 Results of the doc tests.", doc_test_results) message.post() message.post_reply()
423
1
import math import sys def lowerCamelCase_ ( _UpperCamelCase ) -> str: """simple docstring""" snake_case_ : Any = '''''' try: with open(_UpperCamelCase , '''rb''' ) as binary_file: snake_case_ : Dict = binary_file.read() for dat in data: snake_case_ : Union[str, Any] = f'''{dat:08b}''' result += curr_byte return result except OSError: print('''File not accessible''' ) sys.exit() def lowerCamelCase_ ( _UpperCamelCase ) -> str: """simple docstring""" snake_case_ : Any = {'''0''': '''0''', '''1''': '''1'''} snake_case_ , snake_case_ : List[Any] = '''''', '''''' snake_case_ : List[str] = len(_UpperCamelCase ) for i in range(len(_UpperCamelCase ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue snake_case_ : List[str] = lexicon[curr_string] result += last_match_id snake_case_ : int = last_match_id + '''0''' if math.loga(_UpperCamelCase ).is_integer(): snake_case_ : int = {} for curr_key in list(_UpperCamelCase ): snake_case_ : Tuple = lexicon.pop(_UpperCamelCase ) snake_case_ : Optional[int] = new_lex snake_case_ : str = last_match_id + '''1''' index += 1 snake_case_ : Tuple = '''''' return result def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> None: """simple docstring""" snake_case_ : Optional[Any] = 8 try: with open(_UpperCamelCase , '''wb''' ) as opened_file: snake_case_ : Tuple = [ to_write[i : i + byte_length] for i in range(0 , len(_UpperCamelCase ) , _UpperCamelCase ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append('''10000000''' ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array[:-1]: opened_file.write(int(_UpperCamelCase , 2 ).to_bytes(1 , byteorder='''big''' ) ) except OSError: print('''File not accessible''' ) sys.exit() def lowerCamelCase_ ( _UpperCamelCase ) -> str: """simple docstring""" snake_case_ : Union[str, Any] = 0 for letter in data_bits: if letter == "1": break counter += 1 snake_case_ : Union[str, Any] = data_bits[counter:] snake_case_ : Optional[int] = data_bits[counter + 1 :] return data_bits def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> None: """simple docstring""" snake_case_ : Optional[Any] = read_file_binary(_UpperCamelCase ) snake_case_ : int = remove_prefix(_UpperCamelCase ) snake_case_ : Dict = decompress_data(_UpperCamelCase ) write_file_binary(_UpperCamelCase , _UpperCamelCase ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
60
import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, AutoConfig, AutoImageProcessor, CLIPConfig, CLIPImageProcessor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_image_processing import CustomImageProcessor # noqa E402 class __magic_name__ (unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): snake_case__ = 0 def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): snake_case__ = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' ) self.assertIsInstance(_a , _a ) def SCREAMING_SNAKE_CASE__ ( self:str ): with tempfile.TemporaryDirectory() as tmpdirname: snake_case__ = Path(_a ) / '''preprocessor_config.json''' snake_case__ = Path(_a ) / '''config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) ) snake_case__ = AutoImageProcessor.from_pretrained(_a ) self.assertIsInstance(_a , _a ) def SCREAMING_SNAKE_CASE__ ( self:Dict ): # Ensure we can load the image processor from the feature extractor config with tempfile.TemporaryDirectory() as tmpdirname: snake_case__ = Path(_a ) / '''preprocessor_config.json''' snake_case__ = Path(_a ) / '''config.json''' json.dump( {'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) ) snake_case__ = AutoImageProcessor.from_pretrained(_a ) self.assertIsInstance(_a , _a ) def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): with tempfile.TemporaryDirectory() as tmpdirname: snake_case__ = CLIPConfig() # Create a dummy config file with image_proceesor_type snake_case__ = Path(_a ) / '''preprocessor_config.json''' snake_case__ = Path(_a ) / '''config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) ) # remove image_processor_type to make sure config.json alone is enough to load image processor locally snake_case__ = AutoImageProcessor.from_pretrained(_a ).to_dict() config_dict.pop('''image_processor_type''' ) snake_case__ = CLIPImageProcessor(**_a ) # save in new folder model_config.save_pretrained(_a ) config.save_pretrained(_a ) snake_case__ = AutoImageProcessor.from_pretrained(_a ) # make sure private variable is not incorrectly saved snake_case__ = json.loads(config.to_json_string() ) self.assertTrue('''_processor_class''' not in dict_as_saved ) self.assertIsInstance(_a , _a ) def SCREAMING_SNAKE_CASE__ ( self:List[str] ): with tempfile.TemporaryDirectory() as tmpdirname: snake_case__ = Path(_a ) / '''preprocessor_config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , ) snake_case__ = AutoImageProcessor.from_pretrained(_a ) self.assertIsInstance(_a , _a ) def SCREAMING_SNAKE_CASE__ ( self:Dict ): with self.assertRaisesRegex( _a , '''clip-base is not a local folder and is not a valid model identifier''' ): snake_case__ = AutoImageProcessor.from_pretrained('''clip-base''' ) def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): with self.assertRaisesRegex( _a , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): snake_case__ = AutoImageProcessor.from_pretrained(_a , revision='''aaaaaa''' ) def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): with self.assertRaisesRegex( _a , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ): snake_case__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' ) def SCREAMING_SNAKE_CASE__ ( self:Dict ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(_a ): snake_case__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(_a ): snake_case__ = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a ) snake_case__ = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) # Test image processor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(_a ) snake_case__ = AutoImageProcessor.from_pretrained(_a , trust_remote_code=_a ) self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' ) def SCREAMING_SNAKE_CASE__ ( self:Dict ): try: AutoConfig.register('''custom''' , _a ) AutoImageProcessor.register(_a , _a ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_a ): AutoImageProcessor.register(_a , _a ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case__ = Path(_a ) / '''preprocessor_config.json''' snake_case__ = Path(_a ) / '''config.json''' json.dump( {'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) ) snake_case__ = CustomImageProcessor.from_pretrained(_a ) # Now that the config is registered, it can be used as any other config with the auto-API with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(_a ) snake_case__ = AutoImageProcessor.from_pretrained(_a ) self.assertIsInstance(_a , _a ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): class __magic_name__ (snake_case_ ): '''simple docstring''' __lowercase : List[str] = True try: AutoConfig.register('''custom''' , _a ) AutoImageProcessor.register(_a , _a ) # If remote code is not set, the default is to use local snake_case__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(image_processor.is_local ) # If remote code is disabled, we load the local one. snake_case__ = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(image_processor.is_local ) # If remote is enabled, we load from the Hub snake_case__ = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(not hasattr(_a , '''is_local''' ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
33
0
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class __lowerCAmelCase : '''simple docstring''' def __init__( self : Optional[int] ,_a : List[Any] ,_a : Dict=13 ,_a : List[Any]=7 ,_a : Optional[Any]=True ,_a : Any=True ,_a : Optional[int]=True ,_a : Union[str, Any]=99 ,_a : Union[str, Any]=32 ,_a : List[str]=5 ,_a : List[str]=4 ,_a : Dict=37 ,_a : List[Any]="gelu" ,_a : int=0.1 ,_a : Optional[int]=0.1 ,_a : Tuple=512 ,_a : Union[str, Any]=16 ,_a : Optional[Any]=2 ,_a : Optional[Any]=0.02 ,_a : Optional[int]=3 ,_a : str=4 ,_a : Optional[Any]=None ,): '''simple docstring''' A_ : Optional[Any] = parent A_ : str = batch_size A_ : int = seq_length A_ : Union[str, Any] = is_training A_ : Optional[Any] = use_token_type_ids A_ : int = use_labels A_ : Dict = vocab_size A_ : List[Any] = hidden_size A_ : Tuple = num_hidden_layers A_ : Optional[int] = num_attention_heads A_ : int = intermediate_size A_ : Tuple = hidden_act A_ : int = hidden_dropout_prob A_ : Dict = attention_probs_dropout_prob A_ : Any = max_position_embeddings A_ : Optional[Any] = type_vocab_size A_ : Tuple = type_sequence_label_size A_ : int = initializer_range A_ : Optional[Any] = num_labels A_ : str = num_choices A_ : Optional[Any] = scope A_ : List[Any] = self.vocab_size - 1 def _a ( self : Any ): '''simple docstring''' A_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) A_ : List[Any] = None if self.use_token_type_ids: A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) A_ : int = None A_ : str = None A_ : Union[str, Any] = None if self.use_labels: A_ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) A_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) A_ : Any = ids_tensor([self.batch_size] ,self.num_choices ) A_ : List[Any] = OpenAIGPTConfig( vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,) A_ : Tuple = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def _a ( self : Optional[int] ,_a : List[str] ,_a : str ,_a : int ,_a : int ,*_a : Union[str, Any] ): '''simple docstring''' A_ : Optional[Any] = OpenAIGPTModel(config=_a ) model.to(_a ) model.eval() A_ : Optional[int] = model(_a ,token_type_ids=_a ,head_mask=_a ) A_ : str = model(_a ,token_type_ids=_a ) A_ : Dict = model(_a ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self : Dict ,_a : Optional[int] ,_a : Union[str, Any] ,_a : Dict ,_a : List[str] ,*_a : str ): '''simple docstring''' A_ : str = OpenAIGPTLMHeadModel(_a ) model.to(_a ) model.eval() A_ : Any = model(_a ,token_type_ids=_a ,labels=_a ) self.parent.assertEqual(result.loss.shape ,() ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _a ( self : Any ,_a : Dict ,_a : List[Any] ,_a : Dict ,_a : Union[str, Any] ,*_a : str ): '''simple docstring''' A_ : Any = OpenAIGPTDoubleHeadsModel(_a ) model.to(_a ) model.eval() A_ : Optional[int] = model(_a ,token_type_ids=_a ,labels=_a ) self.parent.assertEqual(result.loss.shape ,() ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _a ( self : List[str] ,_a : str ,_a : Tuple ,_a : Dict ,_a : Tuple ,*_a : Dict ): '''simple docstring''' A_ : List[str] = self.num_labels A_ : int = OpenAIGPTForSequenceClassification(_a ) model.to(_a ) model.eval() A_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) A_ : Optional[Any] = model(_a ,token_type_ids=_a ,labels=_a ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def _a ( self : Tuple ): '''simple docstring''' A_ : Union[str, Any] = self.prepare_config_and_inputs() ( A_ ) : str = config_and_inputs A_ : int = { """input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask, } return config, inputs_dict @require_torch class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' a_ = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) a_ = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly a_ = ( { """feature-extraction""": OpenAIGPTModel, """text-classification""": OpenAIGPTForSequenceClassification, """text-generation""": OpenAIGPTLMHeadModel, """zero-shot""": OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def _a ( self : Tuple ,_a : Optional[int] ,_a : str ,_a : List[str] ,_a : List[str] ,_a : Any ): '''simple docstring''' if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def _a ( self : Optional[int] ,_a : str ,_a : Dict ,_a : Optional[int]=False ): '''simple docstring''' A_ : Any = super()._prepare_for_class(_a ,_a ,return_labels=_a ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": A_ : Union[str, Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) ,dtype=torch.long ,device=_a ,) A_ : Any = inputs_dict["""labels"""] A_ : Any = inputs_dict["""labels"""] A_ : Tuple = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) ,dtype=torch.long ,device=_a ,) A_ : int = torch.zeros( self.model_tester.batch_size ,dtype=torch.long ,device=_a ) return inputs_dict def _a ( self : Union[str, Any] ): '''simple docstring''' A_ : Tuple = OpenAIGPTModelTester(self ) A_ : Optional[int] = ConfigTester(self ,config_class=_a ,n_embd=37 ) def _a ( self : Any ): '''simple docstring''' self.config_tester.run_common_tests() def _a ( self : Optional[Any] ): '''simple docstring''' A_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*_a ) def _a ( self : Tuple ): '''simple docstring''' A_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*_a ) def _a ( self : List[Any] ): '''simple docstring''' A_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*_a ) def _a ( self : Union[str, Any] ): '''simple docstring''' A_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_a ) @slow def _a ( self : List[Any] ): '''simple docstring''' for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : Union[str, Any] = OpenAIGPTModel.from_pretrained(_a ) self.assertIsNotNone(_a ) @require_torch class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def _a ( self : List[str] ): '''simple docstring''' A_ : Dict = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" ) model.to(_a ) A_ : Dict = torch.tensor([[481, 4735, 544]] ,dtype=torch.long ,device=_a ) # the president is A_ : Dict = [ 481, 4735, 544, 246, 963, 870, 762, 239, 244, 40477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the A_ : int = model.generate(_a ,do_sample=_a ) self.assertListEqual(output_ids[0].tolist() ,_a )
708
'''simple docstring''' from ... import PretrainedConfig __magic_name__ = { 'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json', } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP a_ = """nezha""" def __init__( self : int ,_a : Union[str, Any]=21128 ,_a : int=768 ,_a : Any=12 ,_a : List[str]=12 ,_a : str=3072 ,_a : int="gelu" ,_a : int=0.1 ,_a : str=0.1 ,_a : Tuple=512 ,_a : List[Any]=64 ,_a : Dict=2 ,_a : List[Any]=0.02 ,_a : Optional[Any]=1e-12 ,_a : List[Any]=0.1 ,_a : Union[str, Any]=0 ,_a : Any=2 ,_a : Union[str, Any]=3 ,_a : int=True ,**_a : int ,): '''simple docstring''' super().__init__(pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,**_a ) A_ : Tuple = vocab_size A_ : int = hidden_size A_ : Any = num_hidden_layers A_ : List[Any] = num_attention_heads A_ : Tuple = hidden_act A_ : List[Any] = intermediate_size A_ : List[str] = hidden_dropout_prob A_ : Tuple = attention_probs_dropout_prob A_ : Dict = max_position_embeddings A_ : Optional[Any] = max_relative_position A_ : List[Any] = type_vocab_size A_ : int = initializer_range A_ : Tuple = layer_norm_eps A_ : Dict = classifier_dropout A_ : int = use_cache
27
0
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging snake_case = logging.get_logger(__name__) snake_case = { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""", # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class A_ ( UpperCAmelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''blenderbot-small''' SCREAMING_SNAKE_CASE_ : int = ['''past_key_values'''] SCREAMING_SNAKE_CASE_ : Tuple = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self : List[str] ,__A : List[Any]=5_0265 ,__A : str=512 ,__A : Optional[int]=8 ,__A : Any=2048 ,__A : Tuple=16 ,__A : str=8 ,__A : int=2048 ,__A : List[str]=16 ,__A : Optional[int]=0.0 ,__A : Any=0.0 ,__A : int=True ,__A : List[Any]=True ,__A : Tuple="gelu" ,__A : Any=512 ,__A : Dict=0.1 ,__A : Tuple=0.0 ,__A : int=0.0 ,__A : int=0.02 ,__A : Dict=1 ,__A : str=False ,__A : Dict=0 ,__A : Union[str, Any]=1 ,__A : Optional[int]=2 ,__A : List[str]=2 ,**__A : Tuple ,) -> Tuple: _lowercase = vocab_size _lowercase = max_position_embeddings _lowercase = d_model _lowercase = encoder_ffn_dim _lowercase = encoder_layers _lowercase = encoder_attention_heads _lowercase = decoder_ffn_dim _lowercase = decoder_layers _lowercase = decoder_attention_heads _lowercase = dropout _lowercase = attention_dropout _lowercase = activation_dropout _lowercase = activation_function _lowercase = init_std _lowercase = encoder_layerdrop _lowercase = decoder_layerdrop _lowercase = use_cache _lowercase = encoder_layers _lowercase = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=__A ,bos_token_id=__A ,eos_token_id=__A ,is_encoder_decoder=__A ,decoder_start_token_id=__A ,forced_eos_token_id=__A ,**__A ,) class A_ ( UpperCAmelCase ): """simple docstring""" @property def __UpperCAmelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: _lowercase = OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ] ) if self.use_past: _lowercase = {0: 'batch'} _lowercase = {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: _lowercase = {0: 'batch', 1: 'decoder_sequence'} _lowercase = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(__A ,direction='inputs' ) elif self.task == "causal-lm": # TODO: figure this case out. _lowercase = OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ] ) if self.use_past: _lowercase , _lowercase = self.num_layers for i in range(__A ): _lowercase = {0: 'batch', 2: 'past_sequence + sequence'} _lowercase = {0: 'batch', 2: 'past_sequence + sequence'} else: _lowercase = OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}), ('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}), ] ) return common_inputs @property def __UpperCAmelCase ( self : Dict ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: _lowercase = super().outputs else: _lowercase = super(__A ,self ).outputs if self.use_past: _lowercase , _lowercase = self.num_layers for i in range(__A ): _lowercase = {0: 'batch', 2: 'past_sequence + sequence'} _lowercase = {0: 'batch', 2: 'past_sequence + sequence'} return common_outputs def __UpperCAmelCase ( self : Optional[int] ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]: _lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __A ,__A ,__A ,__A ,__A ) # Generate decoder inputs _lowercase = seq_length if not self.use_past else 1 _lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __A ,__A ,__A ,__A ,__A ) _lowercase = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()} _lowercase = dict(**__A ,**__A ) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch _lowercase , _lowercase = common_inputs['input_ids'].shape _lowercase = common_inputs['decoder_input_ids'].shape[1] _lowercase , _lowercase = self.num_attention_heads _lowercase = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) _lowercase = decoder_seq_length + 3 _lowercase = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) _lowercase = torch.cat( [common_inputs['decoder_attention_mask'], torch.ones(__A ,__A )] ,dim=1 ) _lowercase = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered _lowercase , _lowercase = self.num_layers _lowercase = min(__A ,__A ) _lowercase = max(__A ,__A ) - min_num_layers _lowercase = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder' for _ in range(__A ): common_inputs["past_key_values"].append( ( torch.zeros(__A ), torch.zeros(__A ), torch.zeros(__A ), torch.zeros(__A ), ) ) # TODO: test this. _lowercase = encoder_shape if remaining_side_name == 'encoder' else decoder_shape for _ in range(__A ,__A ): common_inputs["past_key_values"].append((torch.zeros(__A ), torch.zeros(__A )) ) return common_inputs def __UpperCAmelCase ( self : List[Any] ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]: _lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __A ,__A ,__A ,__A ,__A ) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch _lowercase , _lowercase = common_inputs['input_ids'].shape # Not using the same length for past_key_values _lowercase = seqlen + 2 _lowercase , _lowercase = self.num_layers _lowercase , _lowercase = self.num_attention_heads _lowercase = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) _lowercase = common_inputs['attention_mask'].dtype _lowercase = torch.cat( [common_inputs['attention_mask'], torch.ones(__A ,__A ,dtype=__A )] ,dim=1 ) _lowercase = [ (torch.zeros(__A ), torch.zeros(__A )) for _ in range(__A ) ] return common_inputs def __UpperCAmelCase ( self : Any ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]: # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX _lowercase = compute_effective_axis_dimension( __A ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX _lowercase = tokenizer.num_special_tokens_to_add(__A ) _lowercase = compute_effective_axis_dimension( __A ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=__A ) # Generate dummy inputs according to compute batch and sequence _lowercase = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size _lowercase = dict(tokenizer(__A ,return_tensors=__A ) ) return common_inputs def __UpperCAmelCase ( self : Dict ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]: if self.task in ["default", "seq2seq-lm"]: _lowercase = self._generate_dummy_inputs_for_default_and_seqaseq_lm( __A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A ) elif self.task == "causal-lm": _lowercase = self._generate_dummy_inputs_for_causal_lm( __A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A ) else: _lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A ) return common_inputs def __UpperCAmelCase ( self : List[str] ,__A : Dict ,__A : Any ,__A : List[Any] ,__A : Tuple ) -> Union[str, Any]: if self.task in ["default", "seq2seq-lm"]: _lowercase = super()._flatten_past_key_values_(__A ,__A ,__A ,__A ) else: _lowercase = super(__A ,self )._flatten_past_key_values_( __A ,__A ,__A ,__A )
67
'''simple docstring''' from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url from ..utils.hub import hf_hub_url class lowercase_ ( _A ): a_ = """""" a_ = """hf-legacy""" # "hf://"" is reserved for hffs def __init__( self , UpperCamelCase__ = None , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> Tuple: """simple docstring""" super().__init__(self , **UpperCamelCase__ ) UpperCAmelCase_ = repo_info UpperCAmelCase_ = token UpperCAmelCase_ = None def lowerCamelCase_ ( self ) -> List[Any]: """simple docstring""" if self.dir_cache is None: UpperCAmelCase_ = {} for hf_file in self.repo_info.siblings: # TODO(QL): add sizes UpperCAmelCase_ = { "name": hf_file.rfilename, "size": None, "type": "file", } self.dir_cache.update( { str(UpperCamelCase__ ): {"name": str(UpperCamelCase__ ), "size": None, "type": "directory"} for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1] } ) def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = "rb" , **UpperCamelCase__ , ) -> Optional[int]: """simple docstring""" if not isinstance(self.repo_info , UpperCamelCase__ ): raise NotImplementedError(F"""Open is only implemented for dataset repositories, but got {self.repo_info}""" ) UpperCAmelCase_ = hf_hub_url(self.repo_info.id , UpperCamelCase__ , revision=self.repo_info.sha ) return fsspec.open( UpperCamelCase__ , mode=UpperCamelCase__ , headers=get_authentication_headers_for_url(UpperCamelCase__ , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open() def lowerCamelCase_ ( self , UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]: """simple docstring""" self._get_dirs() UpperCAmelCase_ = self._strip_protocol(UpperCamelCase__ ) if path in self.dir_cache: return self.dir_cache[path] else: raise FileNotFoundError(UpperCamelCase__ ) def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__=False , **UpperCamelCase__ ) -> str: """simple docstring""" self._get_dirs() UpperCAmelCase_ = PurePosixPath(path.strip("/" ) ) UpperCAmelCase_ = {} for p, f in self.dir_cache.items(): UpperCAmelCase_ = PurePosixPath(p.strip("/" ) ) UpperCAmelCase_ = p.parent if root == path: UpperCAmelCase_ = f UpperCAmelCase_ = list(paths.values() ) if detail: return out else: return sorted(f["name"] for f in out )
660
0
from __future__ import annotations import collections import tempfile import unittest import numpy as np from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import is_tf_available, is_vision_available from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_tf_bert import TFBertModelTester from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester from ..deit.test_modeling_tf_deit import TFDeiTModelTester from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester from ..vit.test_modeling_tf_vit import TFViTModelTester if is_tf_available(): from transformers import ( TFBertModel, TFCLIPVisionModel, TFDeiTModel, TFRobertaModel, TFVisionTextDualEncoderModel, TFViTModel, VisionTextDualEncoderConfig, ) if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor def UpperCamelCase ( __lowercase : Optional[Any] ): '''simple docstring''' if isinstance(_A ,collections.abc.Iterable ): return x return (x, x) @require_tf class UpperCAmelCase : '''simple docstring''' def lowerCAmelCase_ ( self , lowercase , lowercase ): """simple docstring""" pass def lowerCAmelCase_ ( self ): """simple docstring""" pass def lowerCAmelCase_ ( self ): """simple docstring""" pass def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase ): """simple docstring""" A_ : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(__lowerCamelCase , __lowerCamelCase ) A_ : List[str] = TFVisionTextDualEncoderModel(__lowerCamelCase ) A_ : Optional[int] = model(input_ids=__lowerCamelCase , pixel_values=__lowerCamelCase , attention_mask=__lowerCamelCase ) self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) ) def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase ): """simple docstring""" A_ : Optional[Any] = self.get_vision_text_model(__lowerCamelCase , __lowerCamelCase ) A_ : Union[str, Any] = TFVisionTextDualEncoderModel(vision_model=__lowerCamelCase , text_model=__lowerCamelCase ) A_ : Optional[int] = model(input_ids=__lowerCamelCase , pixel_values=__lowerCamelCase , attention_mask=__lowerCamelCase ) self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) ) def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase ): """simple docstring""" A_ : int = self.get_vision_text_model(__lowerCamelCase , __lowerCamelCase ) A_ : Union[str, Any] = {"vision_model": vision_model, "text_model": text_model} A_ : Optional[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**__lowerCamelCase ) A_ : Any = model(input_ids=__lowerCamelCase , pixel_values=__lowerCamelCase , attention_mask=__lowerCamelCase ) self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) ) def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase ): """simple docstring""" A_ : Dict = self.get_vision_text_model(__lowerCamelCase , __lowerCamelCase ) A_ : Optional[int] = TFVisionTextDualEncoderModel(vision_model=__lowerCamelCase , text_model=__lowerCamelCase ) A_ : Tuple = model(input_ids=__lowerCamelCase , pixel_values=__lowerCamelCase , attention_mask=__lowerCamelCase ) A_ : List[Any] = output[0].numpy() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__lowerCamelCase ) A_ : List[str] = TFVisionTextDualEncoderModel.from_pretrained(__lowerCamelCase ) A_ : List[str] = model(input_ids=__lowerCamelCase , pixel_values=__lowerCamelCase , attention_mask=__lowerCamelCase ) A_ : List[str] = after_output[0].numpy() A_ : Tuple = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__lowerCamelCase , 1E-5 ) def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase ): """simple docstring""" A_ : Union[str, Any] = self.get_vision_text_model(__lowerCamelCase , __lowerCamelCase ) A_ : int = TFVisionTextDualEncoderModel(vision_model=__lowerCamelCase , text_model=__lowerCamelCase ) A_ : List[str] = model( input_ids=__lowerCamelCase , pixel_values=__lowerCamelCase , attention_mask=__lowerCamelCase , output_attentions=__lowerCamelCase ) A_ : Dict = output.vision_model_output.attentions self.assertEqual(len(__lowerCamelCase ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) A_ : Optional[int] = to_atuple(vision_model.config.image_size ) A_ : Dict = to_atuple(vision_model.config.patch_size ) A_ : Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) A_ : Optional[Any] = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) A_ : List[Any] = output.text_model_output.attentions self.assertEqual(len(__lowerCamelCase ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ): """simple docstring""" A_ : str = np.abs((a - b) ).max() self.assertLessEqual(__lowerCamelCase , __lowerCamelCase , F'''Difference between torch and flax is {diff} (>= {tol}).''' ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Dict = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_model(**__lowerCamelCase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : List[str] = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**__lowerCamelCase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : List[Any] = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**__lowerCamelCase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Dict = self.prepare_config_and_inputs() self.check_save_load(**__lowerCamelCase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Dict = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**__lowerCamelCase ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Any = self.get_pretrained_model_and_inputs() A_ : Tuple = model_a(**__lowerCamelCase ) A_ : int = outputs[0].numpy() with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(__lowerCamelCase ) A_ : Tuple = TFVisionTextDualEncoderModel.from_pretrained(__lowerCamelCase ) A_ : Tuple = model_a(**__lowerCamelCase ) A_ : Optional[int] = after_outputs[0].numpy() A_ : int = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__lowerCamelCase , 1E-5 ) @require_tf class UpperCAmelCase ( lowercase__ , unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self ): """simple docstring""" A_ : List[str] = TFVisionTextDualEncoderModel.from_vision_text_pretrained( 'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-random-bert' ) A_ : Optional[int] = 1_3 A_ : List[str] = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) A_ : List[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) A_ : List[Any] = random_attention_mask([batch_size, 4] ) A_ : List[Any] = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def lowerCAmelCase_ ( self , lowercase , lowercase ): """simple docstring""" A_ : str = TFViTModel(__lowerCamelCase , name='vision_model' ) A_ : int = TFBertModel(__lowerCamelCase , name='text_model' ) return vision_model, text_model def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Dict = TFViTModelTester(self ) A_ : int = TFBertModelTester(self ) A_ : Union[str, Any] = vit_model_tester.prepare_config_and_inputs() A_ : str = bert_model_tester.prepare_config_and_inputs() A_ : List[str] = vision_config_and_inputs ( A_ ) : Tuple = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class UpperCAmelCase ( lowercase__ , unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Tuple = TFVisionTextDualEncoderModel.from_vision_text_pretrained( 'Rocketknight1/tiny-random-deit-tf' , 'hf-internal-testing/tiny-random-roberta' ) A_ : List[Any] = 1_3 A_ : List[str] = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) A_ : Tuple = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) A_ : Optional[Any] = random_attention_mask([batch_size, 4] ) A_ : Dict = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase ): """simple docstring""" A_ : int = self.get_vision_text_model(__lowerCamelCase , __lowerCamelCase ) A_ : List[Any] = TFVisionTextDualEncoderModel(vision_model=__lowerCamelCase , text_model=__lowerCamelCase ) A_ : List[Any] = model( input_ids=__lowerCamelCase , pixel_values=__lowerCamelCase , attention_mask=__lowerCamelCase , output_attentions=__lowerCamelCase ) A_ : List[str] = output.vision_model_output.attentions self.assertEqual(len(__lowerCamelCase ) , vision_config.num_hidden_layers ) # in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) A_ : Tuple = to_atuple(vision_model.config.image_size ) A_ : Tuple = to_atuple(vision_model.config.patch_size ) A_ : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) A_ : List[Any] = num_patches + 2 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) A_ : Optional[int] = output.text_model_output.attentions self.assertEqual(len(__lowerCamelCase ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def lowerCAmelCase_ ( self , lowercase , lowercase ): """simple docstring""" A_ : Optional[int] = TFDeiTModel(__lowerCamelCase , name='vision_model' ) A_ : Optional[int] = TFRobertaModel(__lowerCamelCase , name='text_model' ) return vision_model, text_model def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Any = TFDeiTModelTester(self ) A_ : List[Any] = TFRobertaModelTester(self ) A_ : int = vit_model_tester.prepare_config_and_inputs() A_ : Dict = bert_model_tester.prepare_config_and_inputs() A_ : str = vision_config_and_inputs ( A_ ) : Tuple = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class UpperCAmelCase ( lowercase__ , unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Optional[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained( 'Rocketknight1/tiny-random-clip-tf' , 'hf-internal-testing/tiny-random-bert' ) A_ : Any = 1_3 A_ : int = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) A_ : Dict = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) A_ : int = random_attention_mask([batch_size, 4] ) A_ : Any = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def lowerCAmelCase_ ( self , lowercase , lowercase ): """simple docstring""" A_ : Tuple = TFCLIPVisionModel(__lowerCamelCase , name='vision_model' ) A_ : Union[str, Any] = TFBertModel(__lowerCamelCase , name='text_model' ) return vision_model, text_model def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Union[str, Any] = TFCLIPVisionModelTester(self ) A_ : Optional[int] = TFBertModelTester(self ) A_ : List[str] = clip_model_tester.prepare_config_and_inputs() A_ : List[Any] = bert_model_tester.prepare_config_and_inputs() A_ : Union[str, Any] = vision_config_and_inputs ( A_ ) : Optional[int] = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_vision @require_tf class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Any = TFVisionTextDualEncoderModel.from_pretrained( 'clip-italian/clip-italian' , logit_scale_init_value=1.0 , from_pt=__lowerCamelCase ) A_ : Union[str, Any] = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' ) A_ : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) A_ : int = processor( text=['una foto di un gatto', 'una foto di un cane'] , images=__lowerCamelCase , padding=__lowerCamelCase , return_tensors='np' ) A_ : Tuple = model(**__lowerCamelCase ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) A_ : Optional[Any] = np.array([[1.228_4727, 0.310_4122]] ) self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , __lowerCamelCase , atol=1E-3 ) )
705
from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig _UpperCAmelCase = logging.get_logger(__name__) # General docstring _UpperCAmelCase = """RegNetConfig""" # Base docstring _UpperCAmelCase = """facebook/regnet-y-040""" _UpperCAmelCase = [1, 1088, 7, 7] # Image classification docstring _UpperCAmelCase = """facebook/regnet-y-040""" _UpperCAmelCase = """tabby, tabby cat""" _UpperCAmelCase = [ """facebook/regnet-y-040""", # See all regnet models at https://huggingface.co/models?filter=regnet ] class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self , lowercase , lowercase = 3 , lowercase = 1 , lowercase = 1 , lowercase = "relu" , **lowercase , ): """simple docstring""" super().__init__(**lowercase ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb A_ : int = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) A_ : int = tf.keras.layers.ConvaD( filters=lowercase , kernel_size=lowercase , strides=lowercase , padding='VALID' , groups=lowercase , use_bias=lowercase , name='convolution' , ) A_ : Any = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' ) A_ : Union[str, Any] = ACTaFN[activation] if activation is not None else tf.identity def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : List[str] = self.convolution(self.padding(lowercase ) ) A_ : List[str] = self.normalization(lowercase ) A_ : List[Any] = self.activation(lowercase ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self , lowercase , **lowercase ): """simple docstring""" super().__init__(**lowercase ) A_ : Optional[int] = config.num_channels A_ : str = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , ) def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : Dict = shape_list(lowercase )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) A_ : Optional[int] = tf.transpose(lowercase , perm=(0, 2, 3, 1) ) A_ : Optional[int] = self.embedder(lowercase ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self , lowercase , lowercase = 2 , **lowercase ): """simple docstring""" super().__init__(**lowercase ) A_ : int = tf.keras.layers.ConvaD( filters=lowercase , kernel_size=1 , strides=lowercase , use_bias=lowercase , name='convolution' ) A_ : str = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' ) def lowerCAmelCase_ ( self , lowercase , lowercase = False ): """simple docstring""" return self.normalization(self.convolution(lowercase ) , training=lowercase ) class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self , lowercase , lowercase , **lowercase ): """simple docstring""" super().__init__(**lowercase ) A_ : int = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase , name='pooler' ) A_ : Optional[Any] = [ tf.keras.layers.ConvaD(filters=lowercase , kernel_size=1 , activation='relu' , name='attention.0' ), tf.keras.layers.ConvaD(filters=lowercase , kernel_size=1 , activation='sigmoid' , name='attention.2' ), ] def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : int = self.pooler(lowercase ) for layer_module in self.attention: A_ : Optional[Any] = layer_module(lowercase ) A_ : Optional[int] = hidden_state * pooled return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self , lowercase , lowercase , lowercase , lowercase = 1 , **lowercase ): """simple docstring""" super().__init__(**lowercase ) A_ : str = in_channels != out_channels or stride != 1 A_ : Optional[int] = max(1 , out_channels // config.groups_width ) A_ : List[Any] = ( TFRegNetShortCut(lowercase , stride=lowercase , name='shortcut' ) if should_apply_shortcut else tf.keras.layers.Activation('linear' , name='shortcut' ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. A_ : Optional[int] = [ TFRegNetConvLayer(lowercase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ), TFRegNetConvLayer( lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act , name='layer.1' ), TFRegNetConvLayer(lowercase , kernel_size=1 , activation=lowercase , name='layer.2' ), ] A_ : List[str] = ACTaFN[config.hidden_act] def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : Union[str, Any] = hidden_state for layer_module in self.layers: A_ : int = layer_module(lowercase ) A_ : Union[str, Any] = self.shortcut(lowercase ) hidden_state += residual A_ : Dict = self.activation(lowercase ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self , lowercase , lowercase , lowercase , lowercase = 1 , **lowercase ): """simple docstring""" super().__init__(**lowercase ) A_ : str = in_channels != out_channels or stride != 1 A_ : int = max(1 , out_channels // config.groups_width ) A_ : Optional[int] = ( TFRegNetShortCut(lowercase , stride=lowercase , name='shortcut' ) if should_apply_shortcut else tf.keras.layers.Activation('linear' , name='shortcut' ) ) A_ : List[str] = [ TFRegNetConvLayer(lowercase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ), TFRegNetConvLayer( lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act , name='layer.1' ), TFRegNetSELayer(lowercase , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ), TFRegNetConvLayer(lowercase , kernel_size=1 , activation=lowercase , name='layer.3' ), ] A_ : Union[str, Any] = ACTaFN[config.hidden_act] def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : Dict = hidden_state for layer_module in self.layers: A_ : Tuple = layer_module(lowercase ) A_ : int = self.shortcut(lowercase ) hidden_state += residual A_ : str = self.activation(lowercase ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self , lowercase , lowercase , lowercase , lowercase = 2 , lowercase = 2 , **lowercase ): """simple docstring""" super().__init__(**lowercase ) A_ : Tuple = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer A_ : Tuple = [ # downsampling is done in the first layer with stride of 2 layer(lowercase , lowercase , lowercase , stride=lowercase , name='layers.0' ), *[layer(lowercase , lowercase , lowercase , name=F'''layers.{i+1}''' ) for i in range(depth - 1 )], ] def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" for layer_module in self.layers: A_ : Tuple = layer_module(lowercase ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self , lowercase , **lowercase ): """simple docstring""" super().__init__(**lowercase ) A_ : List[str] = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) ) A_ : Tuple = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(lowercase , config.depths[1:] ) ): self.stages.append(TFRegNetStage(lowercase , lowercase , lowercase , depth=lowercase , name=F'''stages.{i+1}''' ) ) def lowerCAmelCase_ ( self , lowercase , lowercase = False , lowercase = True ): """simple docstring""" A_ : Tuple = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: A_ : Dict = hidden_states + (hidden_state,) A_ : List[Any] = stage_module(lowercase ) if output_hidden_states: A_ : Union[str, Any] = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=lowercase , hidden_states=lowercase ) @keras_serializable class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' lowerCamelCase_ = RegNetConfig def __init__( self , lowercase , **lowercase ): """simple docstring""" super().__init__(**lowercase ) A_ : Optional[Any] = config A_ : int = TFRegNetEmbeddings(lowercase , name='embedder' ) A_ : str = TFRegNetEncoder(lowercase , name='encoder' ) A_ : Optional[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase , name='pooler' ) @unpack_inputs def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = False , ): """simple docstring""" A_ : Optional[int] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) A_ : Dict = return_dict if return_dict is not None else self.config.use_return_dict A_ : Union[str, Any] = self.embedder(lowercase , training=lowercase ) A_ : Optional[int] = self.encoder( lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase ) A_ : Dict = encoder_outputs[0] A_ : List[Any] = self.pooler(lowercase ) # Change to NCHW output format have uniformity in the modules A_ : Union[str, Any] = tf.transpose(lowercase , perm=(0, 3, 1, 2) ) A_ : Optional[int] = tf.transpose(lowercase , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: A_ : int = tuple([tf.transpose(lowercase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class UpperCAmelCase ( __A ): '''simple docstring''' lowerCamelCase_ = RegNetConfig lowerCamelCase_ = '''regnet''' lowerCamelCase_ = '''pixel_values''' @property def lowerCAmelCase_ ( self ): """simple docstring""" return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )} _UpperCAmelCase = r""" Parameters: This model is a Tensorflow [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and behavior. config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ _UpperCAmelCase = r""" Args: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConveNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( '''The bare RegNet model outputting raw features without any specific head on top.''' , __A , ) class UpperCAmelCase ( __A ): '''simple docstring''' def __init__( self , lowercase , *lowercase , **lowercase ): """simple docstring""" super().__init__(lowercase , *lowercase , **lowercase ) A_ : int = TFRegNetMainLayer(lowercase , name='regnet' ) @unpack_inputs @add_start_docstrings_to_model_forward(lowercase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = None , lowercase=False , ): """simple docstring""" A_ : Tuple = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) A_ : int = return_dict if return_dict is not None else self.config.use_return_dict A_ : Tuple = self.regnet( pixel_values=lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( ''' RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''' , __A , ) class UpperCAmelCase ( __A , __A ): '''simple docstring''' def __init__( self , lowercase , *lowercase , **lowercase ): """simple docstring""" super().__init__(lowercase , *lowercase , **lowercase ) A_ : List[Any] = config.num_labels A_ : Optional[Any] = TFRegNetMainLayer(lowercase , name='regnet' ) # classification head A_ : Union[str, Any] = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(lowercase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def lowerCAmelCase_ ( self , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase=False , ): """simple docstring""" A_ : int = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) A_ : int = return_dict if return_dict is not None else self.config.use_return_dict A_ : List[Any] = self.regnet( lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase ) A_ : Optional[Any] = outputs.pooler_output if return_dict else outputs[1] A_ : List[Any] = self.classifier[0](lowercase ) A_ : Union[str, Any] = self.classifier[1](lowercase ) A_ : List[str] = None if labels is None else self.hf_compute_loss(labels=lowercase , logits=lowercase ) if not return_dict: A_ : str = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states )
70
0
import unittest from pathlib import Path from shutil import copyfile from transformers import SPIECE_UNDERLINE, is_sentencepiece_available from transformers.models.speech_to_text import SpeechaTextTokenizer from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __lowercase : Optional[Any] =get_tests_dir("""fixtures/test_sentencepiece.model""") if is_sentencepiece_available(): import sentencepiece as sp __lowercase : List[str] =5 __lowercase : int =10 @require_sentencepiece @require_tokenizers class A ( __lowercase , unittest.TestCase ): _snake_case =SpeechaTextTokenizer _snake_case =False _snake_case =True def lowerCAmelCase__ ( self: List[str] ) -> Any: '''simple docstring''' super().setUp() UpperCAmelCase_ =sp.SentencePieceProcessor() spm_model.Load(_lowerCAmelCase ) UpperCAmelCase_ =["<s>", "<pad>", "</s>", "<unk>"] vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(_lowerCAmelCase ) )] UpperCAmelCase_ =dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) ) UpperCAmelCase_ =Path(self.tmpdirname ) save_json(_lowerCAmelCase , save_dir / VOCAB_FILES_NAMES["vocab_file"] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(_lowerCAmelCase , save_dir / VOCAB_FILES_NAMES["spm_file"] ) UpperCAmelCase_ =SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCAmelCase__ ( self: Any ) -> Dict: '''simple docstring''' UpperCAmelCase_ ="<pad>" UpperCAmelCase_ =1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) , _lowerCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) , _lowerCAmelCase ) def lowerCAmelCase__ ( self: List[Any] ) -> int: '''simple docstring''' UpperCAmelCase_ =list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-1] , "j" ) self.assertEqual(len(_lowerCAmelCase ) , 1001 ) def lowerCAmelCase__ ( self: Optional[Any] ) -> Tuple: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1001 ) def lowerCAmelCase__ ( self: Optional[Any] ) -> Optional[int]: '''simple docstring''' UpperCAmelCase_ =SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) UpperCAmelCase_ =tokenizer.tokenize("This is a test" ) self.assertListEqual(_lowerCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [289, 50, 14, 174, 386] , ) UpperCAmelCase_ =tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( _lowerCAmelCase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , ) UpperCAmelCase_ =tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] ) UpperCAmelCase_ =tokenizer.convert_ids_to_tokens(_lowerCAmelCase ) self.assertListEqual( _lowerCAmelCase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , ) @slow def lowerCAmelCase__ ( self: Optional[Any] ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase_ ={"input_ids": [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_lowerCAmelCase , model_name="facebook/s2t-small-mustc-en-de-st" , revision="a14f04cf0776c02f62a8cb800cf7909e15ea23ad" , ) @require_sentencepiece class A ( unittest.TestCase ): _snake_case ='''valhalla/s2t_mustc_multilinguial_medium''' _snake_case ='''C\'est trop cool''' _snake_case ='''Esto es genial''' @classmethod def lowerCAmelCase__ ( cls: str ) -> Optional[int]: '''simple docstring''' UpperCAmelCase_ =SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name ) return cls def lowerCAmelCase__ ( self: str ) -> List[str]: '''simple docstring''' self.assertEqual(self.tokenizer.lang_code_to_id["pt"] , 4 ) self.assertEqual(self.tokenizer.lang_code_to_id["ru"] , 6 ) self.assertEqual(self.tokenizer.lang_code_to_id["it"] , 9 ) self.assertEqual(self.tokenizer.lang_code_to_id["de"] , 11 ) def lowerCAmelCase__ ( self: Any ) -> Optional[Any]: '''simple docstring''' self.assertEqual(self.tokenizer.vocab_size , 1_0000 ) def lowerCAmelCase__ ( self: str ) -> int: '''simple docstring''' self.assertIn(_lowerCAmelCase , self.tokenizer.all_special_ids ) UpperCAmelCase_ =[ES_CODE, 4, 1601, 47, 7647, 2] UpperCAmelCase_ =self.tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase ) UpperCAmelCase_ =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) self.assertNotIn(self.tokenizer.eos_token , _lowerCAmelCase ) def lowerCAmelCase__ ( self: Dict ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase_ ="fr" UpperCAmelCase_ =self.tokenizer(self.french_text ).input_ids self.assertEqual(encoded[0] , _lowerCAmelCase ) self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id ) def lowerCAmelCase__ ( self: List[str] ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase_ ="fr" self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] ) UpperCAmelCase_ ="es" self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
54
import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging _lowerCamelCase = '\\n\n' _lowerCamelCase = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n' _lowerCamelCase = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class __A ( datasets.Metric ): """simple docstring""" def __snake_case ( self): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''input_texts''': datasets.Value('''string'''), }) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , ) def __snake_case ( self , a__ , a__ , a__ = 16 , a__ = True , a__=None): """simple docstring""" if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": _lowerCamelCase : List[Any] = '''cuda''' else: _lowerCamelCase : Dict = '''cuda''' if torch.cuda.is_available() else '''cpu''' _lowerCamelCase : List[str] = AutoModelForCausalLM.from_pretrained(a__) _lowerCamelCase : Union[str, Any] = model.to(a__) _lowerCamelCase : Tuple = AutoTokenizer.from_pretrained(a__) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: _lowerCamelCase : Any = list(tokenizer.special_tokens_map_extended.values()) # check that the model already has at least one special token defined assert ( len(a__) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]}) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" _lowerCamelCase : Optional[int] = model.config.max_length - 1 else: _lowerCamelCase : Optional[int] = model.config.max_length _lowerCamelCase : Union[str, Any] = tokenizer( a__ , add_special_tokens=a__ , padding=a__ , truncation=a__ , max_length=a__ , return_tensors='''pt''' , return_attention_mask=a__ , ).to(a__) _lowerCamelCase : Any = encodings['''input_ids'''] _lowerCamelCase : Dict = encodings['''attention_mask'''] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1) , 1)), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1) , 2)), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." _lowerCamelCase : Dict = [] _lowerCamelCase : Union[str, Any] = CrossEntropyLoss(reduction='''none''') for start_index in logging.tqdm(range(0 , len(a__) , a__)): _lowerCamelCase : Any = min(start_index + batch_size , len(a__)) _lowerCamelCase : List[Any] = encoded_texts[start_index:end_index] _lowerCamelCase : Optional[int] = attn_masks[start_index:end_index] if add_start_token: _lowerCamelCase : Tuple = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0)).to(a__) _lowerCamelCase : str = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1) _lowerCamelCase : str = torch.cat( [torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa).to(a__), attn_mask] , dim=1) _lowerCamelCase : Dict = encoded_batch with torch.no_grad(): _lowerCamelCase : List[str] = model(a__ , attention_mask=a__).logits _lowerCamelCase : List[str] = out_logits[..., :-1, :].contiguous() _lowerCamelCase : Dict = labels[..., 1:].contiguous() _lowerCamelCase : List[Any] = attn_mask[..., 1:].contiguous() _lowerCamelCase : str = torch.expa( (loss_fct(shift_logits.transpose(1 , 2) , a__) * shift_attention_mask_batch).sum(1) / shift_attention_mask_batch.sum(1)) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(a__)}
114
0
import inspect import unittest from transformers import YolosConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import YolosForObjectDetection, YolosModel from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _A : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=[30, 30] , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=10 , ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = parent SCREAMING_SNAKE_CASE_ : Dict = batch_size SCREAMING_SNAKE_CASE_ : Tuple = image_size SCREAMING_SNAKE_CASE_ : str = patch_size SCREAMING_SNAKE_CASE_ : Optional[int] = num_channels SCREAMING_SNAKE_CASE_ : Optional[int] = is_training SCREAMING_SNAKE_CASE_ : int = use_labels SCREAMING_SNAKE_CASE_ : int = hidden_size SCREAMING_SNAKE_CASE_ : List[Any] = num_hidden_layers SCREAMING_SNAKE_CASE_ : Optional[Any] = num_attention_heads SCREAMING_SNAKE_CASE_ : List[str] = intermediate_size SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_act SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_dropout_prob SCREAMING_SNAKE_CASE_ : Any = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ : int = type_sequence_label_size SCREAMING_SNAKE_CASE_ : Optional[int] = initializer_range SCREAMING_SNAKE_CASE_ : int = num_labels SCREAMING_SNAKE_CASE_ : Union[str, Any] = scope SCREAMING_SNAKE_CASE_ : Dict = n_targets SCREAMING_SNAKE_CASE_ : List[Any] = num_detection_tokens # we set the expected sequence length (which is used in several tests) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens SCREAMING_SNAKE_CASE_ : Tuple = (image_size[1] // patch_size) * (image_size[0] // patch_size) SCREAMING_SNAKE_CASE_ : int = num_patches + 1 + self.num_detection_tokens def UpperCAmelCase ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = None if self.use_labels: # labels is a list of Dict (each Dict being the labels for a given example in the batch) SCREAMING_SNAKE_CASE_ : int = [] for i in range(self.batch_size ): SCREAMING_SNAKE_CASE_ : Dict = {} SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.randint( high=self.num_labels , size=(self.n_targets,) , device=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.rand(self.n_targets , 4 , device=_SCREAMING_SNAKE_CASE ) labels.append(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : Any = self.get_config() return config, pixel_values, labels def UpperCAmelCase ( self ): """simple docstring""" return YolosConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , ) def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = YolosModel(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() SCREAMING_SNAKE_CASE_ : List[str] = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) ) def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = YolosForObjectDetection(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() SCREAMING_SNAKE_CASE_ : Tuple = model(pixel_values=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : Dict = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) ) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) ) SCREAMING_SNAKE_CASE_ : str = model(pixel_values=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) ) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) ) def UpperCAmelCase ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = config_and_inputs SCREAMING_SNAKE_CASE_ : List[Any] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class _A ( __magic_name__ , __magic_name__ , unittest.TestCase): SCREAMING_SNAKE_CASE : Dict = (YolosModel, YolosForObjectDetection) if is_torch_available() else () SCREAMING_SNAKE_CASE : str = ( {'''feature-extraction''': YolosModel, '''object-detection''': YolosForObjectDetection} if is_torch_available() else {} ) SCREAMING_SNAKE_CASE : Any = False SCREAMING_SNAKE_CASE : Union[str, Any] = False SCREAMING_SNAKE_CASE : Tuple = False SCREAMING_SNAKE_CASE : Dict = False def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) if return_labels: if model_class.__name__ == "YolosForObjectDetection": SCREAMING_SNAKE_CASE_ : int = [] for i in range(self.model_tester.batch_size ): SCREAMING_SNAKE_CASE_ : List[Any] = {} SCREAMING_SNAKE_CASE_ : List[Any] = torch.ones( size=(self.model_tester.n_targets,) , device=_SCREAMING_SNAKE_CASE , dtype=torch.long ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.ones( self.model_tester.n_targets , 4 , device=_SCREAMING_SNAKE_CASE , dtype=torch.float ) labels.append(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : List[Any] = labels return inputs_dict def UpperCAmelCase ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = YolosModelTester(self ) SCREAMING_SNAKE_CASE_ : str = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 ) def UpperCAmelCase ( self ): """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase ( self ): """simple docstring""" pass def UpperCAmelCase ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ : str = model_class(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) SCREAMING_SNAKE_CASE_ : Any = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) ) def UpperCAmelCase ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ : Optional[int] = model_class(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : Tuple = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE_ : Tuple = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE_ : Any = ['pixel_values'] self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE ) def UpperCAmelCase ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE_ : List[str] = True # in YOLOS, the seq_len is different SCREAMING_SNAKE_CASE_ : str = self.model_tester.expected_seq_len for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ : Union[str, Any] = True SCREAMING_SNAKE_CASE_ : str = False SCREAMING_SNAKE_CASE_ : Dict = True SCREAMING_SNAKE_CASE_ : Any = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE_ : Tuple = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) SCREAMING_SNAKE_CASE_ : Optional[int] = outputs.attentions self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] SCREAMING_SNAKE_CASE_ : List[Any] = True SCREAMING_SNAKE_CASE_ : int = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE_ : Dict = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) SCREAMING_SNAKE_CASE_ : str = outputs.attentions self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) SCREAMING_SNAKE_CASE_ : str = len(_SCREAMING_SNAKE_CASE ) # Check attention is always last and order is fine SCREAMING_SNAKE_CASE_ : Union[str, Any] = True SCREAMING_SNAKE_CASE_ : List[str] = True SCREAMING_SNAKE_CASE_ : Optional[Any] = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE_ : Tuple = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) SCREAMING_SNAKE_CASE_ : Dict = 1 self.assertEqual(out_len + added_hidden_states , len(_SCREAMING_SNAKE_CASE ) ) SCREAMING_SNAKE_CASE_ : str = outputs.attentions self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def UpperCAmelCase ( self ): """simple docstring""" def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ : Any = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE_ : List[Any] = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) SCREAMING_SNAKE_CASE_ : Tuple = outputs.hidden_states SCREAMING_SNAKE_CASE_ : Optional[Any] = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) # YOLOS has a different seq_length SCREAMING_SNAKE_CASE_ : int = self.model_tester.expected_seq_len self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ : Optional[int] = True check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE_ : Optional[int] = True check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_object_detection(*_SCREAMING_SNAKE_CASE ) @slow def UpperCAmelCase ( self ): """simple docstring""" for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE_ : List[str] = YolosModel.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) def A_ ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class _A ( unittest.TestCase): @cached_property def UpperCAmelCase ( self ): """simple docstring""" return AutoImageProcessor.from_pretrained('hustvl/yolos-small' ) if is_vision_available() else None @slow def UpperCAmelCase ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = YolosForObjectDetection.from_pretrained('hustvl/yolos-small' ).to(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : str = self.default_image_processor SCREAMING_SNAKE_CASE_ : Optional[Any] = prepare_img() SCREAMING_SNAKE_CASE_ : Optional[int] = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(_SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE_ : Optional[int] = model(inputs.pixel_values ) # verify outputs SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.Size((1, 100, 92) ) self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor( [[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=_SCREAMING_SNAKE_CASE , ) SCREAMING_SNAKE_CASE_ : int = torch.tensor( [[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=_SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) ) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) ) # verify postprocessing SCREAMING_SNAKE_CASE_ : Tuple = image_processor.post_process_object_detection( _SCREAMING_SNAKE_CASE , threshold=0.3 , target_sizes=[image.size[::-1]] )[0] SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861] ).to(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = [75, 75, 17, 63, 17] SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495] ).to(_SCREAMING_SNAKE_CASE ) self.assertEqual(len(results['scores'] ) , 5 ) self.assertTrue(torch.allclose(results['scores'] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) ) self.assertSequenceEqual(results['labels'].tolist() , _SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(results['boxes'][0, :] , _SCREAMING_SNAKE_CASE ) )
353
import argparse import random import joblib import numpy as np import torch from igf.igf import ( SecondaryLearner, collect_objective_set, compute_perplexity, generate_datasets, load_gpta, recopy_gpta, set_seed, train_secondary_learner, ) from torch.utils.data import DataLoader, RandomSampler from transformers import GPTaLMHeadModel def A_ ( a=3_2 , a=1_0 , a=1_0_0 , a=1_0_2_6 , a=True , a="data/tokenized_stories_train_wikitext103.jbl" , a="igf_context_pairs.jbl" , ): """simple docstring""" set_seed(3 ) # generate train_data and objective_set SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = generate_datasets( a , a , number=a , min_len=1_0_2_6 , trim=a ) # keeps model same across runs set_seed(4 ) # model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights # can we train on GPU? SCREAMING_SNAKE_CASE_ : str = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' ) # load pretrained model SCREAMING_SNAKE_CASE_ : List[str] = load_gpta('gpt2' ).to(a ) print('computing perplexity on objective set' ) SCREAMING_SNAKE_CASE_ : List[Any] = compute_perplexity(a , a , a ).item() print('perplexity on objective set:' , a ) # collect igf pairs and save to file demo.jbl collect_objective_set(a , a , a , a , a , a , a , a ) # clean up, delete model and data we don't need anymore del model, train_data, objective_set torch.cuda.empty_cache() def A_ ( a , a=1_5 , a=1_2_8 , a=1_0_0 , a="igf_model.pt" , ): """simple docstring""" set_seed(4_2 ) # Load pre-trained model SCREAMING_SNAKE_CASE_ : int = GPTaLMHeadModel.from_pretrained('gpt2' ) # Initialize secondary learner to use embedding weights of model SCREAMING_SNAKE_CASE_ : Union[str, Any] = SecondaryLearner(a ) # Train secondary learner SCREAMING_SNAKE_CASE_ : Any = train_secondary_learner( a , a , max_epochs=a , batch_size=a , eval_freq=1_0_0 , igf_model_path=a , ) del model, secondary_learner_train_data torch.cuda.empty_cache() return secondary_learner def A_ ( a , a , a , a=3_2 , a=1_0_0_0 , a=1_6 , a=1.0 , a=recopy_gpta , a=None , a=1_0 , a="gpt2_finetuned.pt" , ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' ) SCREAMING_SNAKE_CASE_ : List[str] = RandomSampler(a ) SCREAMING_SNAKE_CASE_ : Dict = DataLoader(a , sampler=a ) SCREAMING_SNAKE_CASE_ : int = max_steps // (len(a )) + 1 SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0 SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.zeros((1, context_len) , dtype=torch.long , device=a ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = recopy_model(a , a , a ) model.train() if secondary_learner is not None: secondary_learner.to(a ) secondary_learner.eval() SCREAMING_SNAKE_CASE_ : Optional[Any] = [] SCREAMING_SNAKE_CASE_ : List[str] = 0 SCREAMING_SNAKE_CASE_ : List[Any] = [] SCREAMING_SNAKE_CASE_ : Dict = [] # Compute the performance of the transformer model at the beginning SCREAMING_SNAKE_CASE_ : str = compute_perplexity(a , a , a ) test_perps.append(a ) print('Test perplexity, step' , a , ':' , a ) for epoch in range(int(a ) ): for step, example in enumerate(a ): torch.cuda.empty_cache() SCREAMING_SNAKE_CASE_ : Union[str, Any] = random.randint(0 , example.size(2 ) - context_len - 1 ) SCREAMING_SNAKE_CASE_ : Any = example[0, 0, start : start + context_len] lm_optimizer.zero_grad() SCREAMING_SNAKE_CASE_ : Optional[Any] = model(a , labels=a ) SCREAMING_SNAKE_CASE_ : int = True if secondary_learner is not None: SCREAMING_SNAKE_CASE_ : str = secondary_learner.forward( torch.tensor(a , dtype=torch.long , device=a ).unsqueeze(0 ) )[0].item() observed_qs.append(float(a ) ) # Here we implement the simple non-constant threshold for the predicted IG(X) value # We will decay the selectivity of our secondary learner filter from # 1 standard deviation above average to 1 below average after 10 batches. if global_step == 1_0: SCREAMING_SNAKE_CASE_ : Any = -1 if predicted_q < threshold: SCREAMING_SNAKE_CASE_ : Tuple = False # If we passed the filter, add the context to the batch! if do_backprop: contexts.append(np.array(context.cpu() ) ) SCREAMING_SNAKE_CASE_ : Tuple = outputs[0] lm_loss.backward() examples += 1 del outputs # Once the batch is filled with enough contexts, backprop on the batch. if examples == batch_size: torch.cuda.empty_cache() SCREAMING_SNAKE_CASE_ : Tuple = 0 # Do LM backprop torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 ) lm_optimizer.step() lm_scheduler.step() # Update learning rate schedule global_step += 1 # Compute the performance of the transformer model at this batch if global_step % eval_interval == 0: SCREAMING_SNAKE_CASE_ : Optional[int] = compute_perplexity(a , a , a ) test_perps.append(a ) print('Test perplexity, step' , a , ':' , a ) # Break out of the loop after 60 batches if max_steps > 0 and global_step > 6_0: break if max_steps > 0 and global_step > 6_0: break # save finetuned transformer model torch.save(model.state_dict() , a ) torch.cuda.empty_cache() # Do some cleaning up so we can reinitialize for the next run of this function del lm_optimizer del lm_scheduler return model def A_ ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = argparse.ArgumentParser(description='Fine-tune a transformer model with IGF on a language modeling task' ) # Required parameters parser.add_argument( '--data_dir' , default=a , type=a , required=a , help='The input data dir. Should contain data files for WikiText.' , ) parser.add_argument( '--model_name_or_path' , default=a , type=a , required=a , help='Path to pretrained model or model identifier from huggingface.co/models' , ) parser.add_argument( '--data_file' , type=a , default=a , help=( 'A jbl file containing tokenized data which can be split as objective dataset, ' 'train_dataset and test_dataset.' ) , ) parser.add_argument( '--igf_data_file' , type=a , default=a , help='A jbl file containing the context and information gain pairs to train secondary learner.' , ) parser.add_argument( '--output_dir' , default=a , type=a , required=a , help='The output directory where the final fine-tuned model is stored.' , ) parser.add_argument( '--tokenizer_name' , default=a , type=a , help='Pretrained tokenizer name or path if not the same as model_name' , ) parser.add_argument('--seed' , type=a , default=a , help='A seed for reproducible training.' ) parser.add_argument( '--context_len' , default=3_2 , type=a , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument( '--size_objective_set' , default=1_0_0 , type=a , help='number of articles that are long enough to be used as our objective set' , ) parser.add_argument( '--eval_freq' , default=1_0_0 , type=a , help='secondary model evaluation is triggered at eval_freq' ) parser.add_argument('--max_steps' , default=1_0_0_0 , type=a , help='To calculate training epochs' ) parser.add_argument( '--secondary_learner_batch_size' , default=1_2_8 , type=a , help='batch size of training data for secondary learner' , ) parser.add_argument( '--batch_size' , default=1_6 , type=a , help='batch size of training data of language model(gpt2) ' ) parser.add_argument( '--eval_interval' , default=1_0 , type=a , help=( 'decay the selectivity of our secondary learner filter from' '1 standard deviation above average to 1 below average after 10 batches' ) , ) parser.add_argument( '--number' , default=1_0_0 , type=a , help='The number of examples split to be used as objective_set/test_data' ) parser.add_argument( '--min_len' , default=1_0_2_6 , type=a , help='The minimum length of the article to be used as objective set' ) parser.add_argument( '--secondary_learner_max_epochs' , default=1_5 , type=a , help='number of epochs to train secondary learner' ) parser.add_argument('--trim' , default=a , type=a , help='truncate the example if it exceeds context length' ) parser.add_argument( '--threshold' , default=1.0 , type=a , help=( 'The threshold value used by secondary learner to filter the train_data and allow only' ' informative data as input to the model' ) , ) parser.add_argument('--finetuned_model_name' , default='gpt2_finetuned.pt' , type=a , help='finetuned_model_name' ) parser.add_argument( '--recopy_model' , default=a , type=a , help='Reset the model to the original pretrained GPT-2 weights after each iteration' , ) # function calls # Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner generate_n_pairs( context_len=3_2 , max_steps=1_0 , size_objective_set=1_0_0 , min_len=1_0_2_6 , trim=a , data_file='data/tokenized_stories_train_wikitext103.jbl' , igf_data_file='igf_context_pairs.jbl' , ) # Load train data for secondary learner SCREAMING_SNAKE_CASE_ : Tuple = joblib.load('data/IGF_values.jbl' ) # Train secondary learner SCREAMING_SNAKE_CASE_ : Optional[Any] = training_secondary_learner( a , secondary_learner_max_epochs=1_5 , secondary_learner_batch_size=1_2_8 , eval_freq=1_0_0 , igf_model_path='igf_model.pt' , ) # load pretrained gpt2 model SCREAMING_SNAKE_CASE_ : int = GPTaLMHeadModel.from_pretrained('gpt2' ) set_seed(4_2 ) # Generate train and test data to train and evaluate gpt2 model SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = generate_datasets( context_len=3_2 , file='data/tokenized_stories_train_wikitext103.jbl' , number=1_0_0 , min_len=1_0_2_6 , trim=a ) # fine-tuning of the gpt2 model using igf (Information Gain Filtration) finetune( a , a , a , context_len=3_2 , max_steps=1_0_0_0 , batch_size=1_6 , threshold=1.0 , recopy_model=a , secondary_learner=a , eval_interval=1_0 , finetuned_model_name='gpt2_finetuned.pt' , ) if __name__ == "__main__": main()
353
1
import unittest from datasets import load_dataset from transformers import BloomTokenizerFast from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCamelCase ( _UpperCAmelCase , unittest.TestCase ): lowerCAmelCase : Dict = None lowerCAmelCase : str = BloomTokenizerFast lowerCAmelCase : Dict = BloomTokenizerFast lowerCAmelCase : Union[str, Any] = True lowerCAmelCase : List[Any] = False lowerCAmelCase : List[str] = """tokenizer_file""" lowerCAmelCase : Optional[Any] = {"""bos_token""": """<s>""", """eos_token""": """</s>""", """unk_token""": """<unk>""", """pad_token""": """<pad>"""} def __A ( self ): super().setUp() A__ = BloomTokenizerFast.from_pretrained("bigscience/tokenizer" ) tokenizer.save_pretrained(self.tmpdirname ) def __A ( self , **UpperCAmelCase__ ): kwargs.update(self.special_tokens_map ) return BloomTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ) def __A ( self ): A__ = self.get_rust_tokenizer() A__ = ["The quick brown fox</s>", "jumps over the lazy dog</s>"] A__ = [[2_175, 23_714, 73_173, 144_252, 2], [77, 132_619, 3_478, 368, 109_586, 35_433, 2]] A__ = tokenizer.batch_encode_plus(UpperCAmelCase__ )["input_ids"] self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) A__ = tokenizer.batch_decode(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def __A ( self , UpperCAmelCase__=6 ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): A__ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) # tokenizer_r.pad_token = None # Hotfixing padding = None # Simple input A__ = "This is a simple input" A__ = ["This is a simple input 1", "This is a simple input 2"] A__ = ("This is a simple input", "This is a pair") A__ = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests try: tokenizer_r.encode(UpperCAmelCase__ , max_length=UpperCAmelCase__ ) tokenizer_r.encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ ) tokenizer_r.batch_encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ ) tokenizer_r.encode(UpperCAmelCase__ , max_length=UpperCAmelCase__ ) tokenizer_r.batch_encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ ) except ValueError: self.fail("Bloom Tokenizer should be able to deal with padding" ) A__ = None # Hotfixing padding = None self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding="max_length" ) # Simple input self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding="max_length" ) # Simple input self.assertRaises( UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding="max_length" , ) # Pair input self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding="max_length" ) # Pair input self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding="max_length" ) # Pair input self.assertRaises( UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding="max_length" , ) def __A ( self ): A__ = self.get_rust_tokenizer() A__ = load_dataset("xnli" , "all_languages" , split="test" , streaming=UpperCAmelCase__ ) A__ = next(iter(UpperCAmelCase__ ) )["premise"] # pick up one data A__ = list(sample_data.values() ) A__ = list(map(tokenizer.encode , UpperCAmelCase__ ) ) A__ = [tokenizer.decode(UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__ ) for x in output_tokens] self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def __A ( self ): # The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have # any sequence length constraints. This test of the parent class will fail since it relies on the # maximum sequence length of the positoonal embeddings. self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 ) self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
491
import torch from torch import nn from transformers import CLIPPreTrainedModel, CLIPVisionModel from ...models.attention import BasicTransformerBlock from ...utils import logging UpperCAmelCase_ : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name class UpperCamelCase ( _UpperCAmelCase ): def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=768 ): super().__init__(UpperCAmelCase__ ) A__ = proj_size A__ = CLIPVisionModel(UpperCAmelCase__ ) A__ = PaintByExampleMapper(UpperCAmelCase__ ) A__ = nn.LayerNorm(config.hidden_size ) A__ = nn.Linear(config.hidden_size , self.proj_size ) # uncondition for scaling A__ = nn.Parameter(torch.randn((1, 1, self.proj_size) ) ) def __A ( self , UpperCAmelCase__ , UpperCAmelCase__=False ): A__ = self.model(pixel_values=UpperCAmelCase__ ) A__ = clip_output.pooler_output A__ = self.mapper(latent_states[:, None] ) A__ = self.final_layer_norm(UpperCAmelCase__ ) A__ = self.proj_out(UpperCAmelCase__ ) if return_uncond_vector: return latent_states, self.uncond_vector return latent_states class UpperCamelCase ( nn.Module ): def __init__( self , UpperCAmelCase__ ): super().__init__() A__ = (config.num_hidden_layers + 1) // 5 A__ = config.hidden_size A__ = 1 A__ = nn.ModuleList( [ BasicTransformerBlock(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , activation_fn="gelu" , attention_bias=UpperCAmelCase__ ) for _ in range(UpperCAmelCase__ ) ] ) def __A ( self , UpperCAmelCase__ ): for block in self.blocks: A__ = block(UpperCAmelCase__ ) return hidden_states
491
1
'''simple docstring''' import argparse from collections import defaultdict import yaml __lowerCamelCase : Tuple = "docs/source/en/_toctree.yml" def UpperCAmelCase_ ( lowerCAmelCase_ ): """simple docstring""" lowercase = defaultdict(lowerCAmelCase_ ) lowercase = [] lowercase = [] for doc in doc_list: if "local" in doc: counts[doc["local"]] += 1 if doc["title"].lower() == "overview": overview_doc.append({"local": doc["local"], "title": doc["title"]} ) else: new_doc_list.append(lowerCAmelCase_ ) lowercase = new_doc_list lowercase = [key for key, value in counts.items() if value > 1] lowercase = [] for duplicate_key in duplicates: lowercase = list({doc["title"] for doc in doc_list if doc["local"] == duplicate_key} ) if len(lowerCAmelCase_ ) > 1: raise ValueError( f'{duplicate_key} is present several times in the documentation table of content at ' "`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the " "others." ) # Only add this once new_doc.append({"local": duplicate_key, "title": titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in doc_list if "local" not in counts or counts[doc["local"]] == 1] ) lowercase = sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : s["title"].lower() ) # "overview" gets special treatment and is always first if len(lowerCAmelCase_ ) > 1: raise ValueError("{doc_list} has two 'overview' docs which is not allowed." ) overview_doc.extend(lowerCAmelCase_ ) # Sort return overview_doc def UpperCAmelCase_ ( lowerCAmelCase_=False ): """simple docstring""" with open(lowerCAmelCase_ , encoding="utf-8" ) as f: lowercase = yaml.safe_load(f.read() ) # Get to the API doc lowercase = 0 while content[api_idx]["title"] != "API": api_idx += 1 lowercase = content[api_idx]["sections"] # Then to the model doc lowercase = 0 while api_doc[scheduler_idx]["title"] != "Schedulers": scheduler_idx += 1 lowercase = api_doc[scheduler_idx]["sections"] lowercase = clean_doc_toc(lowerCAmelCase_ ) lowercase = False if new_scheduler_doc != scheduler_doc: lowercase = True if overwrite: lowercase = new_scheduler_doc if diff: if overwrite: lowercase = api_doc with open(lowerCAmelCase_ , "w" , encoding="utf-8" ) as f: f.write(yaml.dump(lowerCAmelCase_ , allow_unicode=lowerCAmelCase_ ) ) else: raise ValueError( "The model doc part of the table of content is not properly sorted, run `make style` to fix this." ) def UpperCAmelCase_ ( lowerCAmelCase_=False ): """simple docstring""" with open(lowerCAmelCase_ , encoding="utf-8" ) as f: lowercase = yaml.safe_load(f.read() ) # Get to the API doc lowercase = 0 while content[api_idx]["title"] != "API": api_idx += 1 lowercase = content[api_idx]["sections"] # Then to the model doc lowercase = 0 while api_doc[pipeline_idx]["title"] != "Pipelines": pipeline_idx += 1 lowercase = False lowercase = api_doc[pipeline_idx]["sections"] lowercase = [] # sort sub pipeline docs for pipeline_doc in pipeline_docs: if "section" in pipeline_doc: lowercase = pipeline_doc["section"] lowercase = clean_doc_toc(lowerCAmelCase_ ) if overwrite: lowercase = new_sub_pipeline_doc new_pipeline_docs.append(lowerCAmelCase_ ) # sort overall pipeline doc lowercase = clean_doc_toc(lowerCAmelCase_ ) if new_pipeline_docs != pipeline_docs: lowercase = True if overwrite: lowercase = new_pipeline_docs if diff: if overwrite: lowercase = api_doc with open(lowerCAmelCase_ , "w" , encoding="utf-8" ) as f: f.write(yaml.dump(lowerCAmelCase_ , allow_unicode=lowerCAmelCase_ ) ) else: raise ValueError( "The model doc part of the table of content is not properly sorted, run `make style` to fix this." ) if __name__ == "__main__": __lowerCamelCase : int = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") __lowerCamelCase : Optional[int] = parser.parse_args() check_scheduler_doc(args.fix_and_overwrite) check_pipeline_doc(args.fix_and_overwrite)
459
'''simple docstring''' import datasets from .evaluate import evaluate __lowerCamelCase : Optional[Any] = "\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n" __lowerCamelCase : Any = "\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n" __lowerCamelCase : Union[str, Any] = "\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the SQuAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]\n >>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]\n >>> squad_metric = datasets.load_metric(\"squad\")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase ( datasets.Metric ): def UpperCAmelCase__ (self : Union[str, Any] ) -> Union[str, Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": {"id": datasets.Value("string" ), "prediction_text": datasets.Value("string" )}, "references": { "id": datasets.Value("string" ), "answers": datasets.features.Sequence( { "text": datasets.Value("string" ), "answer_start": datasets.Value("int32" ), } ), }, } ) , codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , ) def UpperCAmelCase__ (self : List[str] , A__ : List[Any] , A__ : List[str] ) -> str: lowercase = {prediction["id"]: prediction["prediction_text"] for prediction in predictions} lowercase = [ { "paragraphs": [ { "qas": [ { "answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]], "id": ref["id"], } for ref in references ] } ] } ] lowercase = evaluate(dataset=A__ , predictions=A__ ) return score
459
1
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast from ...utils import logging if TYPE_CHECKING: from ...feature_extraction_utils import FeatureExtractionMixin from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType __A : Any = logging.get_logger(__name__) __A : Any = { "openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json", } # fmt: off __A : int = [ 1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 357, 366, 438, 532, 685, 705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377, 1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211, 4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786, 11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791, 17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409, 34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361 ] __A : Dict = [ 1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 359, 503, 522, 542, 873, 893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627, 3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647, 7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793, 14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675, 22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865, 42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362 ] class lowerCamelCase( __snake_case ): '''simple docstring''' __magic_name__ = 'whisper' __magic_name__ = ['past_key_values'] __magic_name__ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self , snake_case_=5_1865 , snake_case_=80 , snake_case_=6 , snake_case_=4 , snake_case_=6 , snake_case_=4 , snake_case_=1536 , snake_case_=1536 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=5_0257 , snake_case_=True , snake_case_=True , snake_case_="gelu" , snake_case_=256 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.02 , snake_case_=False , snake_case_=1500 , snake_case_=448 , snake_case_=5_0256 , snake_case_=5_0256 , snake_case_=5_0256 , snake_case_=None , snake_case_=[220, 5_0256] , snake_case_=False , snake_case_=256 , snake_case_=False , snake_case_=0.05 , snake_case_=10 , snake_case_=2 , snake_case_=0.0 , snake_case_=10 , snake_case_=0 , snake_case_=7 , **snake_case_ , ): _A = vocab_size _A = num_mel_bins _A = d_model _A = encoder_layers _A = encoder_attention_heads _A = decoder_layers _A = decoder_attention_heads _A = decoder_ffn_dim _A = encoder_ffn_dim _A = dropout _A = attention_dropout _A = activation_dropout _A = activation_function _A = init_std _A = encoder_layerdrop _A = decoder_layerdrop _A = use_cache _A = encoder_layers _A = scale_embedding # scale factor will be sqrt(d_model) if True _A = max_source_positions _A = max_target_positions # Audio Classification-specific parameters. Feel free to ignore for other classes. _A = classifier_proj_size _A = use_weighted_layer_sum # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _A = apply_spec_augment _A = mask_time_prob _A = mask_time_length _A = mask_time_min_masks _A = mask_feature_prob _A = mask_feature_length _A = mask_feature_min_masks _A = median_filter_width super().__init__( pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , is_encoder_decoder=snake_case_ , decoder_start_token_id=snake_case_ , suppress_tokens=snake_case_ , begin_suppress_tokens=snake_case_ , **snake_case_ , ) class lowerCamelCase( __snake_case ): '''simple docstring''' @property def lowerCAmelCase__ ( self ): _A = OrderedDict( [ ('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}), ] ) if self.use_past: _A = {0: 'batch'} else: _A = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(snake_case_ , direction='inputs' ) return common_inputs def lowerCAmelCase__ ( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , snake_case_ = 2_2050 , snake_case_ = 5.0 , snake_case_ = 220 , ): _A = OrderedDict() _A = OnnxConfig.generate_dummy_inputs( self , preprocessor=preprocessor.feature_extractor , batch_size=snake_case_ , framework=snake_case_ , sampling_rate=snake_case_ , time_duration=snake_case_ , frequency=snake_case_ , ) _A = encoder_inputs['input_features'].shape[2] _A = encoder_sequence_length // 2 if self.use_past else seq_length _A = super().generate_dummy_inputs( preprocessor.tokenizer , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) _A = encoder_inputs.pop('input_features' ) _A = decoder_inputs.pop('decoder_input_ids' ) if "past_key_values" in decoder_inputs: _A = decoder_inputs.pop('past_key_values' ) return dummy_inputs @property def lowerCAmelCase__ ( self ): return 1E-3
27
import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionXLImgaImgPipeline, UNetaDConditionModel, ) from diffusers.utils import floats_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class snake_case__ (A__ , A__ , unittest.TestCase ): """simple docstring""" __lowerCAmelCase :Optional[Any] = StableDiffusionXLImgaImgPipeline __lowerCAmelCase :Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} __lowerCAmelCase :Optional[Any] = PipelineTesterMixin.required_optional_params - {"latents"} __lowerCAmelCase :Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS __lowerCAmelCase :Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS __lowerCAmelCase :Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS def SCREAMING_SNAKE_CASE__( self ) -> str: """simple docstring""" torch.manual_seed(0 ) a__ : Optional[Any] = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=__lowercase , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=8_0 , cross_attention_dim=6_4 , ) a__ : List[Any] = EulerDiscreteScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , ) torch.manual_seed(0 ) a__ : Tuple = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_2_8 , ) torch.manual_seed(0 ) a__ : int = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="""gelu""" , projection_dim=3_2 , ) a__ : Optional[int] = CLIPTextModel(__lowercase ) a__ : List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__lowercase ) a__ : Union[str, Any] = CLIPTextModelWithProjection(__lowercase ) a__ : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__lowercase ) a__ : List[str] = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """text_encoder_2""": text_encoder_a, """tokenizer_2""": tokenizer_a, # "safety_checker": None, # "feature_extractor": None, } return components def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase=0 ) -> Tuple: """simple docstring""" a__ : Any = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__lowercase ) ).to(__lowercase ) a__ : Union[str, Any] = image / 2 + 0.5 if str(__lowercase ).startswith("""mps""" ): a__ : Dict = torch.manual_seed(__lowercase ) else: a__ : List[str] = torch.Generator(device=__lowercase ).manual_seed(__lowercase ) a__ : Optional[Any] = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 5.0, """output_type""": """numpy""", """strength""": 0.7_5, } return inputs def SCREAMING_SNAKE_CASE__( self ) -> Tuple: """simple docstring""" a__ : int = """cpu""" # ensure determinism for the device-dependent torch.Generator a__ : Any = self.get_dummy_components() a__ : List[Any] = StableDiffusionXLImgaImgPipeline(**__lowercase ) a__ : List[Any] = sd_pipe.to(__lowercase ) sd_pipe.set_progress_bar_config(disable=__lowercase ) a__ : Dict = self.get_dummy_inputs(__lowercase ) a__ : str = sd_pipe(**__lowercase ).images a__ : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 3_2, 3_2, 3) a__ : Union[str, Any] = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def SCREAMING_SNAKE_CASE__( self ) -> int: """simple docstring""" super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 ) def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]: """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]: """simple docstring""" pass def SCREAMING_SNAKE_CASE__( self ) -> Tuple: """simple docstring""" a__ : Union[str, Any] = self.get_dummy_components() a__ : List[str] = StableDiffusionXLImgaImgPipeline(**__lowercase ) a__ : Optional[int] = sd_pipe.to(__lowercase ) a__ : int = sd_pipe.to(__lowercase ) sd_pipe.set_progress_bar_config(disable=__lowercase ) # forward without prompt embeds a__ : Any = self.get_dummy_inputs(__lowercase ) a__ : Optional[int] = 3 * ["""this is a negative prompt"""] a__ : List[str] = negative_prompt a__ : Any = 3 * [inputs["""prompt"""]] a__ : Union[str, Any] = sd_pipe(**__lowercase ) a__ : Dict = output.images[0, -3:, -3:, -1] # forward with prompt embeds a__ : Optional[Any] = self.get_dummy_inputs(__lowercase ) a__ : Dict = 3 * ["""this is a negative prompt"""] a__ : int = 3 * [inputs.pop("""prompt""" )] ( ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ) : List[Any] = sd_pipe.encode_prompt(__lowercase , negative_prompt=__lowercase ) a__ : Any = sd_pipe( **__lowercase , prompt_embeds=__lowercase , negative_prompt_embeds=__lowercase , pooled_prompt_embeds=__lowercase , negative_pooled_prompt_embeds=__lowercase , ) a__ : Optional[int] = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4 @slow @require_torch_gpu class snake_case__ (unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase="cpu" , __lowercase=torch.floataa , __lowercase=0 ) -> List[str]: """simple docstring""" a__ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(__lowercase ) a__ : List[Any] = np.random.RandomState(__lowercase ).standard_normal((1, 4, 6_4, 6_4) ) a__ : Dict = torch.from_numpy(__lowercase ).to(device=__lowercase , dtype=__lowercase ) a__ : List[Any] = { """prompt""": """a photograph of an astronaut riding a horse""", """latents""": latents, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def SCREAMING_SNAKE_CASE__( self ) -> int: """simple docstring""" a__ : Optional[int] = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" ) pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) a__ : Any = self.get_inputs(__lowercase ) a__ : List[str] = pipe(**__lowercase ).images a__ : List[str] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_1_2, 5_1_2, 3) a__ : Any = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] ) assert np.abs(image_slice - expected_slice ).max() < 7E-3
136
0
import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch __A : Dict = "sshleifer/bart-tiny-random" __A : Dict = "patrickvonplaten/t5-tiny-random" @require_torch class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' return AutoConfig.from_pretrained(_snake_case ) def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase , *__lowerCamelCase = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.num_hidden_layers , 1 ) def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase , *__lowerCamelCase = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=_snake_case ) def lowercase_ ( self ) -> Dict: '''simple docstring''' __lowerCamelCase , *__lowerCamelCase = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=_snake_case ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers ) def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase , *__lowerCamelCase = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , 1 ) def lowercase_ ( self ) -> int: '''simple docstring''' with self.assertRaises(_snake_case ): create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=_snake_case , d=_snake_case )
708
import inspect import unittest from transformers import MobileViTVaConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel from transformers.models.mobilevitva.modeling_mobilevitva import ( MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST, make_divisible, ) if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(lowerCamelCase__ , 'width_multiplier' ) ) class __lowerCAmelCase : """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=64 , lowerCamelCase__=2 , lowerCamelCase__=3 , lowerCamelCase__="swish" , lowerCamelCase__=3 , lowerCamelCase__=32 , lowerCamelCase__=0.1 , lowerCamelCase__=0.02 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=10 , lowerCamelCase__=None , lowerCamelCase__=0.25 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , ) -> int: '''simple docstring''' __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = image_size __lowerCamelCase = patch_size __lowerCamelCase = num_channels __lowerCamelCase = make_divisible(512 * width_multiplier , divisor=8 ) __lowerCamelCase = hidden_act __lowerCamelCase = conv_kernel_size __lowerCamelCase = output_stride __lowerCamelCase = classifier_dropout_prob __lowerCamelCase = use_labels __lowerCamelCase = is_training __lowerCamelCase = num_labels __lowerCamelCase = initializer_range __lowerCamelCase = scope __lowerCamelCase = width_multiplier __lowerCamelCase = ffn_dropout __lowerCamelCase = attn_dropout def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowerCamelCase = None __lowerCamelCase = None if self.use_labels: __lowerCamelCase = ids_tensor([self.batch_size] , self.num_labels ) __lowerCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) __lowerCamelCase = self.get_config() return config, pixel_values, labels, pixel_labels def lowercase_ ( self ) -> Dict: '''simple docstring''' return MobileViTVaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]: '''simple docstring''' __lowerCamelCase = MobileViTVaModel(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() __lowerCamelCase = model(lowerCamelCase__ ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any: '''simple docstring''' __lowerCamelCase = self.num_labels __lowerCamelCase = MobileViTVaForImageClassification(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() __lowerCamelCase = model(lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = self.num_labels __lowerCamelCase = MobileViTVaForSemanticSegmentation(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() __lowerCamelCase = model(lowerCamelCase__ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) __lowerCamelCase = model(lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def lowercase_ ( self ) -> List[Any]: '''simple docstring''' __lowerCamelCase = self.prepare_config_and_inputs() __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs __lowerCamelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ): """simple docstring""" snake_case_ = ( (MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation) if is_torch_available() else () ) snake_case_ = ( { '''feature-extraction''': MobileViTVaModel, '''image-classification''': MobileViTVaForImageClassification, '''image-segmentation''': MobileViTVaForSemanticSegmentation, } if is_torch_available() else {} ) snake_case_ = False snake_case_ = False snake_case_ = False snake_case_ = False def lowercase_ ( self ) -> Dict: '''simple docstring''' __lowerCamelCase = MobileViTVaModelTester(self ) __lowerCamelCase = MobileViTVaConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ ) def lowercase_ ( self ) -> Tuple: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='MobileViTV2 does not use inputs_embeds' ) def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' pass @unittest.skip(reason='MobileViTV2 does not support input and output embeddings' ) def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' pass @unittest.skip(reason='MobileViTV2 does not output attentions' ) def lowercase_ ( self ) -> Dict: '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.' ) def lowercase_ ( self ) -> Dict: '''simple docstring''' pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def lowercase_ ( self ) -> int: '''simple docstring''' pass def lowercase_ ( self ) -> List[str]: '''simple docstring''' __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = model_class(lowerCamelCase__ ) __lowerCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCamelCase = [*signature.parameters.keys()] __lowerCamelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , lowerCamelCase__ ) def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase__ ) def lowercase_ ( self ) -> List[str]: '''simple docstring''' def check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): __lowerCamelCase = model_class(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() with torch.no_grad(): __lowerCamelCase = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) ) __lowerCamelCase = outputs.hidden_states __lowerCamelCase = 5 self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ ) # MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. __lowerCamelCase = 2 for i in range(len(lowerCamelCase__ ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = True check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowerCamelCase = True check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def lowercase_ ( self ) -> List[Any]: '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ ) def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase__ ) @slow def lowercase_ ( self ) -> str: '''simple docstring''' for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase = MobileViTVaModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) def lowerCamelCase_ ( ) -> Union[str, Any]: """simple docstring""" __lowerCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def lowercase_ ( self ) -> List[Any]: '''simple docstring''' return ( MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ) if is_vision_available() else None ) @slow def lowercase_ ( self ) -> Dict: '''simple docstring''' __lowerCamelCase = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ).to( lowerCamelCase__ ) __lowerCamelCase = self.default_image_processor __lowerCamelCase = prepare_img() __lowerCamelCase = image_processor(images=lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): __lowerCamelCase = model(**lowerCamelCase__ ) # verify the logits __lowerCamelCase = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , lowerCamelCase__ ) __lowerCamelCase = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) ) @slow def lowercase_ ( self ) -> Tuple: '''simple docstring''' __lowerCamelCase = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) __lowerCamelCase = model.to(lowerCamelCase__ ) __lowerCamelCase = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) __lowerCamelCase = prepare_img() __lowerCamelCase = image_processor(images=lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): __lowerCamelCase = model(**lowerCamelCase__ ) __lowerCamelCase = outputs.logits # verify the logits __lowerCamelCase = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape , lowerCamelCase__ ) __lowerCamelCase = torch.tensor( [ [[7.08_63, 7.15_25, 6.82_01], [6.69_31, 6.87_70, 6.89_33], [6.29_78, 7.03_66, 6.96_36]], [[-3.71_34, -3.67_12, -3.66_75], [-3.58_25, -3.35_49, -3.47_77], [-3.34_35, -3.39_79, -3.28_57]], [[-2.93_29, -2.80_03, -2.73_69], [-3.05_64, -2.47_80, -2.02_07], [-2.68_89, -1.92_98, -1.76_40]], ] , device=lowerCamelCase__ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase__ , atol=1e-4 ) ) @slow def lowercase_ ( self ) -> List[str]: '''simple docstring''' __lowerCamelCase = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) __lowerCamelCase = model.to(lowerCamelCase__ ) __lowerCamelCase = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) __lowerCamelCase = prepare_img() __lowerCamelCase = image_processor(images=lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): __lowerCamelCase = model(**lowerCamelCase__ ) __lowerCamelCase = outputs.logits.detach().cpu() __lowerCamelCase = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ , target_sizes=[(50, 60)] ) __lowerCamelCase = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape , lowerCamelCase__ ) __lowerCamelCase = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ ) __lowerCamelCase = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape , lowerCamelCase__ )
167
0
from __future__ import annotations def a__ ( snake_case__ : int = 4 ): _UpperCAmelCase : Union[str, Any] = abs(snake_case__ ) or 4 return [[1 + x + y * row_size for x in range(snake_case__ )] for y in range(snake_case__ )] def a__ ( snake_case__ : list[list[int]] ): return reverse_row(transpose(snake_case__ ) ) # OR.. transpose(reverse_column(matrix)) def a__ ( snake_case__ : list[list[int]] ): return reverse_row(reverse_column(snake_case__ ) ) # OR.. reverse_column(reverse_row(matrix)) def a__ ( snake_case__ : list[list[int]] ): return reverse_column(transpose(snake_case__ ) ) # OR.. transpose(reverse_row(matrix)) def a__ ( snake_case__ : list[list[int]] ): _UpperCAmelCase : int = [list(snake_case__ ) for x in zip(*snake_case__ )] return matrix def a__ ( snake_case__ : list[list[int]] ): _UpperCAmelCase : int = matrix[::-1] return matrix def a__ ( snake_case__ : list[list[int]] ): _UpperCAmelCase : List[str] = [x[::-1] for x in matrix] return matrix def a__ ( snake_case__ : list[list[int]] ): for i in matrix: print(*snake_case__ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : List[Any] = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 90 counterclockwise:\n') print_matrix(rotate_aa(matrix)) SCREAMING_SNAKE_CASE__ : Any = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 180:\n') print_matrix(rotate_aaa(matrix)) SCREAMING_SNAKE_CASE__ : str = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 270 counterclockwise:\n') print_matrix(rotate_aaa(matrix))
643
def a__ ( snake_case__ : int , snake_case__ : int ): return 1 if input_a == input_a else 0 def a__ ( ): assert xnor_gate(0 , 0 ) == 1 assert xnor_gate(0 , 1 ) == 0 assert xnor_gate(1 , 0 ) == 0 assert xnor_gate(1 , 1 ) == 1 if __name__ == "__main__": print(xnor_gate(0, 0)) print(xnor_gate(0, 1)) print(xnor_gate(1, 0)) print(xnor_gate(1, 1))
643
1
import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class lowerCAmelCase_ : def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ): return None class lowerCAmelCase_ : def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ): return None class lowerCAmelCase_ ( unittest.TestCase ): __a : str = [ # (model_name, model_kwargs) ("bert-base-cased", {}), ("gpt2", {"use_cache": False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def snake_case ( self ): for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(UpperCamelCase_ ,'tf' ,12 ,**UpperCamelCase_ ) @require_torch @slow def snake_case ( self ): for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(UpperCamelCase_ ,'pt' ,12 ,**UpperCamelCase_ ) @require_torch @slow def snake_case ( self ): from transformers import BertModel SCREAMING_SNAKE_CASE_ : Optional[Any] = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words'] with NamedTemporaryFile(mode='w+t' ) as vocab_file: vocab_file.write('\n'.join(UpperCamelCase_ ) ) vocab_file.flush() SCREAMING_SNAKE_CASE_ : Optional[Any] = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: SCREAMING_SNAKE_CASE_ : Any = BertModel(BertConfig(vocab_size=len(UpperCamelCase_ ) ) ) model.save_pretrained(UpperCamelCase_ ) self._test_export(UpperCamelCase_ ,'pt' ,12 ,UpperCamelCase_ ) @require_tf @slow def snake_case ( self ): for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: SCREAMING_SNAKE_CASE_ : Dict = self._test_export(UpperCamelCase_ ,'tf' ,12 ,**UpperCamelCase_ ) SCREAMING_SNAKE_CASE_ : str = quantize(Path(UpperCamelCase_ ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(UpperCamelCase_ ).stat().st_size: self.fail('Quantized model is bigger than initial ONNX model' ) @require_torch @slow def snake_case ( self ): for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: SCREAMING_SNAKE_CASE_ : int = self._test_export(UpperCamelCase_ ,'pt' ,12 ,**UpperCamelCase_ ) SCREAMING_SNAKE_CASE_ : List[Any] = quantize(UpperCamelCase_ ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(UpperCamelCase_ ).stat().st_size: self.fail('Quantized model is bigger than initial ONNX model' ) def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__=None ,**snake_case__ ): try: # Compute path with TemporaryDirectory() as tempdir: SCREAMING_SNAKE_CASE_ : int = Path(UpperCamelCase_ ).joinpath('model.onnx' ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,**UpperCamelCase_ ) return path except Exception as e: self.fail(UpperCamelCase_ ) @require_torch @require_tokenizers @slow def snake_case ( self ): from transformers import BertModel SCREAMING_SNAKE_CASE_ : List[str] = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) ) SCREAMING_SNAKE_CASE_ : List[str] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' ) self._test_infer_dynamic_axis(UpperCamelCase_ ,UpperCamelCase_ ,'pt' ) @require_tf @require_tokenizers @slow def snake_case ( self ): from transformers import TFBertModel SCREAMING_SNAKE_CASE_ : Any = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' ) self._test_infer_dynamic_axis(UpperCamelCase_ ,UpperCamelCase_ ,'tf' ) def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ): SCREAMING_SNAKE_CASE_ : str = FeatureExtractionPipeline(UpperCamelCase_ ,UpperCamelCase_ ) SCREAMING_SNAKE_CASE_ : str = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1'] SCREAMING_SNAKE_CASE_ : Dict = infer_shapes(UpperCamelCase_ ,UpperCamelCase_ ) # Assert all variables are present self.assertEqual(len(UpperCamelCase_ ) ,len(UpperCamelCase_ ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] ,UpperCamelCase_ ) self.assertSequenceEqual(variable_names[3:] ,UpperCamelCase_ ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] ,{0: 'batch', 1: 'sequence'} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes['output_0'] ,{0: 'batch', 1: 'sequence'} ) self.assertDictEqual(shapes['output_1'] ,{0: 'batch'} ) def snake_case ( self ): SCREAMING_SNAKE_CASE_ : Tuple = ['input_ids', 'attention_mask', 'token_type_ids'] SCREAMING_SNAKE_CASE_ : str = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]} SCREAMING_SNAKE_CASE_ : str = ensure_valid_input(FuncContiguousArgs() ,UpperCamelCase_ ,UpperCamelCase_ ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(UpperCamelCase_ ) ,3 ) # Should have exactly the same input names self.assertEqual(set(UpperCamelCase_ ) ,set(UpperCamelCase_ ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(UpperCamelCase_ ,(tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) SCREAMING_SNAKE_CASE_ : str = ensure_valid_input(FuncNonContiguousArgs() ,UpperCamelCase_ ,UpperCamelCase_ ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(UpperCamelCase_ ) ,1 ) self.assertEqual(len(UpperCamelCase_ ) ,1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] ,tokens['input_ids'] ) self.assertEqual(ordered_input_names[0] ,'input_ids' ) def snake_case ( self ): SCREAMING_SNAKE_CASE_ : Optional[Any] = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) ,'-test' ) self.assertEqual('/home/something/my_fake_model-test.onnx' ,generated.as_posix() )
704
import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) UpperCamelCase__ : str = logging.getLogger(__name__) @dataclass(frozen=lowerCamelCase_ ) class lowerCAmelCase_ : __a : str __a : str __a : Optional[str] = None __a : Optional[str] = None __a : Optional[str] = None @dataclass(frozen=lowerCamelCase_ ) class lowerCAmelCase_ : __a : List[int] __a : Optional[List[int]] = None __a : Optional[List[int]] = None __a : Optional[Union[int, float]] = None __a : Optional[int] = None if is_torch_available(): import torch from torch.utils.data import Dataset class lowerCAmelCase_ ( lowerCamelCase_ ): __a : List[InputFeatures] def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = None ,snake_case__=False ,snake_case__ = False ,): SCREAMING_SNAKE_CASE_ : Optional[Any] = hans_processors[task]() SCREAMING_SNAKE_CASE_ : List[str] = os.path.join( snake_case__ ,'cached_{}_{}_{}_{}'.format( 'dev' if evaluate else 'train' ,tokenizer.__class__.__name__ ,str(snake_case__ ) ,snake_case__ ,) ,) SCREAMING_SNAKE_CASE_ : Union[str, Any] = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list[2], label_list[1] SCREAMING_SNAKE_CASE_ : Any = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. SCREAMING_SNAKE_CASE_ : Dict = cached_features_file + '.lock' with FileLock(snake_case__ ): if os.path.exists(snake_case__ ) and not overwrite_cache: logger.info(F'Loading features from cached file {cached_features_file}' ) SCREAMING_SNAKE_CASE_ : Optional[int] = torch.load(snake_case__ ) else: logger.info(F'Creating features from dataset file at {data_dir}' ) SCREAMING_SNAKE_CASE_ : List[Any] = ( processor.get_dev_examples(snake_case__ ) if evaluate else processor.get_train_examples(snake_case__ ) ) logger.info('Training examples: %s' ,len(snake_case__ ) ) SCREAMING_SNAKE_CASE_ : List[str] = hans_convert_examples_to_features(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) logger.info('Saving features into cached file %s' ,snake_case__ ) torch.save(self.features ,snake_case__ ) def __len__( self ): return len(self.features ) def __getitem__( self ,snake_case__ ): return self.features[i] def snake_case ( self ): return self.label_list if is_tf_available(): import tensorflow as tf class lowerCAmelCase_ : __a : List[InputFeatures] def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = 128 ,snake_case__=False ,snake_case__ = False ,): SCREAMING_SNAKE_CASE_ : Optional[int] = hans_processors[task]() SCREAMING_SNAKE_CASE_ : Optional[int] = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list[2], label_list[1] SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list SCREAMING_SNAKE_CASE_ : int = processor.get_dev_examples(snake_case__ ) if evaluate else processor.get_train_examples(snake_case__ ) SCREAMING_SNAKE_CASE_ : int = hans_convert_examples_to_features(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features ) ,desc='convert examples to features' ): if ex_index % 10000 == 0: logger.info('Writing example %d of %d' % (ex_index, len(snake_case__ )) ) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) SCREAMING_SNAKE_CASE_ : List[Any] = tf.data.Dataset.from_generator( snake_case__ ,( { 'example_id': tf.intaa, 'input_ids': tf.intaa, 'attention_mask': tf.intaa, 'token_type_ids': tf.intaa, }, tf.intaa, ) ,( { 'example_id': tf.TensorShape([] ), 'input_ids': tf.TensorShape([None, None] ), 'attention_mask': tf.TensorShape([None, None] ), 'token_type_ids': tf.TensorShape([None, None] ), }, tf.TensorShape([] ), ) ,) def snake_case ( self ): return self.dataset def __len__( self ): return len(self.features ) def __getitem__( self ,snake_case__ ): return self.features[i] def snake_case ( self ): return self.label_list class lowerCAmelCase_ ( lowerCamelCase_ ): def snake_case ( self ,snake_case__ ): return self._create_examples(self._read_tsv(os.path.join(snake_case__ ,'heuristics_train_set.txt' ) ) ,'train' ) def snake_case ( self ,snake_case__ ): return self._create_examples(self._read_tsv(os.path.join(snake_case__ ,'heuristics_evaluation_set.txt' ) ) ,'dev' ) def snake_case ( self ): return ["contradiction", "entailment", "neutral"] def snake_case ( self ,snake_case__ ,snake_case__ ): SCREAMING_SNAKE_CASE_ : Optional[int] = [] for i, line in enumerate(snake_case__ ): if i == 0: continue SCREAMING_SNAKE_CASE_ : List[str] = '%s-%s' % (set_type, line[0]) SCREAMING_SNAKE_CASE_ : Dict = line[5] SCREAMING_SNAKE_CASE_ : Dict = line[6] SCREAMING_SNAKE_CASE_ : Tuple = line[7][2:] if line[7].startswith('ex' ) else line[7] SCREAMING_SNAKE_CASE_ : Optional[int] = line[0] examples.append(InputExample(guid=snake_case__ ,text_a=snake_case__ ,text_b=snake_case__ ,label=snake_case__ ,pairID=snake_case__ ) ) return examples def __UpperCAmelCase ( lowerCamelCase_ : List[InputExample] , lowerCamelCase_ : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : PreTrainedTokenizer , ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = {label: i for i, label in enumerate(lowerCamelCase_ )} SCREAMING_SNAKE_CASE_ : Dict = [] for ex_index, example in tqdm.tqdm(enumerate(lowerCamelCase_ ) , desc='convert examples to features' ): if ex_index % 1_00_00 == 0: logger.info('Writing example %d' % (ex_index) ) SCREAMING_SNAKE_CASE_ : Any = tokenizer( example.text_a , example.text_b , add_special_tokens=lowerCamelCase_ , max_length=lowerCamelCase_ , padding='max_length' , truncation=lowerCamelCase_ , return_overflowing_tokens=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE_ : List[Any] = label_map[example.label] if example.label in label_map else 0 SCREAMING_SNAKE_CASE_ : List[str] = int(example.pairID ) features.append(InputFeatures(**lowerCamelCase_ , label=lowerCamelCase_ , pairID=lowerCamelCase_ ) ) for i, example in enumerate(examples[:5] ): logger.info('*** Example ***' ) logger.info(F'guid: {example}' ) logger.info(F'features: {features[i]}' ) return features UpperCamelCase__ : str = { '''hans''': 3, } UpperCamelCase__ : Dict = { '''hans''': HansProcessor, }
685
0
import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor _lowerCAmelCase : str = logging.get_logger(__name__) class __magic_name__ ( lowerCAmelCase_ ): def __init__( self , *__snake_case , **__snake_case ) -> None: '''simple docstring''' warnings.warn( 'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use SegformerImageProcessor instead.' , __snake_case , ) super().__init__(*__snake_case , **__snake_case )
242
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) _lowerCAmelCase : Optional[Any] = { "YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json", "YituTech/conv-bert-medium-small": ( "https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json" ), "YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json", # See all ConvBERT models at https://huggingface.co/models?filter=convbert } class __magic_name__ ( lowerCAmelCase_ ): SCREAMING_SNAKE_CASE = 'convbert' def __init__( self , __snake_case=3_0522 , __snake_case=768 , __snake_case=12 , __snake_case=12 , __snake_case=3072 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=512 , __snake_case=2 , __snake_case=0.02 , __snake_case=1e-12 , __snake_case=1 , __snake_case=0 , __snake_case=2 , __snake_case=768 , __snake_case=2 , __snake_case=9 , __snake_case=1 , __snake_case=None , **__snake_case , ) -> Optional[Any]: '''simple docstring''' super().__init__( pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case , ) __a =vocab_size __a =hidden_size __a =num_hidden_layers __a =num_attention_heads __a =intermediate_size __a =hidden_act __a =hidden_dropout_prob __a =attention_probs_dropout_prob __a =max_position_embeddings __a =type_vocab_size __a =initializer_range __a =layer_norm_eps __a =embedding_size __a =head_ratio __a =conv_kernel_size __a =num_groups __a =classifier_dropout class __magic_name__ ( lowerCAmelCase_ ): @property def __magic_name__ ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "multiple-choice": __a ={0: 'batch', 1: 'choice', 2: 'sequence'} else: __a ={0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis), ] )
242
1
"""simple docstring""" import numpy as np def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray: return np.where(vector > 0 , __UpperCAmelCase , (alpha * (np.exp(__UpperCAmelCase ) - 1)) ) if __name__ == "__main__": import doctest doctest.testmod()
507
"""simple docstring""" import io import os import unicodedata from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _A = logging.get_logger(__name__) _A = """▁""" _A = {"""vocab_file""": """vocab.txt""", """sentencepiece_model_ckpt""": """sentencepiece.bpe.model"""} _A = { """sentencepiece_model_file""": """sentencepiece.bpe.model""", """vocab_file""": """vocab.txt""", } _A = { """vocab_file""": { """ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""", """ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""", }, """sentencepiece_model_file""": { """ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""", """ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""", }, } _A = { """ernie-m-base""": 5_1_4, """ernie-m-large""": 5_1_4, } _A = { """ernie-m-base""": {"""do_lower_case""": False}, """ernie-m-large""": {"""do_lower_case""": False}, } class _lowerCamelCase ( a_ ): _lowerCamelCase :List[str] = ["input_ids"] _lowerCamelCase :Any = VOCAB_FILES_NAMES _lowerCamelCase :List[Any] = PRETRAINED_INIT_CONFIGURATION _lowerCamelCase :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase :List[str] = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase :List[Any] = RESOURCE_FILES_NAMES def __init__( self : Tuple , UpperCamelCase : int , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Tuple=False , UpperCamelCase : int="utf8" , UpperCamelCase : List[Any]="[UNK]" , UpperCamelCase : int="[SEP]" , UpperCamelCase : List[Any]="[PAD]" , UpperCamelCase : str="[CLS]" , UpperCamelCase : Dict="[MASK]" , UpperCamelCase : Optional[Dict[str, Any]] = None , **UpperCamelCase : Dict , ) -> None: """simple docstring""" # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. lowerCAmelCase__ : int = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , vocab_file=UpperCamelCase , encoding=UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase , ) lowerCAmelCase__ : Any = do_lower_case lowerCAmelCase__ : Optional[Any] = sentencepiece_model_ckpt lowerCAmelCase__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(UpperCamelCase ) # to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning if vocab_file is not None: lowerCAmelCase__ : Optional[int] = self.load_vocab(filepath=UpperCamelCase ) else: lowerCAmelCase__ : Tuple = {self.sp_model.id_to_piece(UpperCamelCase ): id for id in range(self.sp_model.get_piece_size() )} lowerCAmelCase__ : List[str] = {v: k for k, v in self.vocab.items()} def _lowerCAmelCase ( self : Tuple , UpperCamelCase : int ) -> Any: """simple docstring""" if text is None: return None lowerCAmelCase__ : Optional[Any] = self.tokenize(UpperCamelCase ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = """""", [] for i, ch in enumerate(UpperCamelCase ): if ch in self.SP_CHAR_MAPPING: lowerCAmelCase__ : Union[str, Any] = self.SP_CHAR_MAPPING.get(UpperCamelCase ) else: lowerCAmelCase__ : List[Any] = unicodedata.normalize("""NFKC""" , UpperCamelCase ) if self.is_whitespace(UpperCamelCase ): continue normalized_text += ch char_mapping.extend([i] * len(UpperCamelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = normalized_text, [], 0 if self.do_lower_case: lowerCAmelCase__ : List[Any] = text.lower() for token in split_tokens: if token[:1] == "▁": lowerCAmelCase__ : List[str] = token[1:] lowerCAmelCase__ : Dict = text[offset:].index(UpperCamelCase ) + offset lowerCAmelCase__ : List[Any] = start + len(UpperCamelCase ) token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) ) lowerCAmelCase__ : Optional[int] = end return token_mapping @property def _lowerCAmelCase ( self : Dict ) -> Optional[int]: """simple docstring""" return len(self.vocab ) def _lowerCAmelCase ( self : Dict ) -> List[Any]: """simple docstring""" return dict(self.vocab , **self.added_tokens_encoder ) def __getstate__( self : Dict ) -> Optional[Any]: """simple docstring""" lowerCAmelCase__ : List[Any] = self.__dict__.copy() lowerCAmelCase__ : Any = None return state def __setstate__( self : List[str] , UpperCamelCase : Optional[Any] ) -> List[Any]: """simple docstring""" lowerCAmelCase__ : Optional[int] = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): lowerCAmelCase__ : Union[str, Any] = {} lowerCAmelCase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.sentencepiece_model_ckpt ) def _lowerCAmelCase ( self : Tuple , UpperCamelCase : str ) -> str: """simple docstring""" return "".join((self.SP_CHAR_MAPPING.get(UpperCamelCase , UpperCamelCase ) for c in text) ) def _lowerCAmelCase ( self : Any , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Optional[int]=64 , UpperCamelCase : List[Any]=0.1 ) -> Any: """simple docstring""" if self.sp_model_kwargs.get("""enable_sampling""" ) is True: lowerCAmelCase__ : Union[str, Any] = True if self.sp_model_kwargs.get("""alpha""" ) is not None: lowerCAmelCase__ : Union[str, Any] = self.sp_model_kwargs.get("""alpha""" ) if self.sp_model_kwargs.get("""nbest_size""" ) is not None: lowerCAmelCase__ : Union[str, Any] = self.sp_model_kwargs.get("""nbest_size""" ) if not enable_sampling: lowerCAmelCase__ : Union[str, Any] = self.sp_model.EncodeAsPieces(UpperCamelCase ) else: lowerCAmelCase__ : List[str] = self.sp_model.SampleEncodeAsPieces(UpperCamelCase , UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : List[str] = [] for pi, piece in enumerate(UpperCamelCase ): if piece == SPIECE_UNDERLINE: if not pieces[pi + 1].startswith(UpperCamelCase ) and pi != 0: new_pieces.append(UpperCamelCase ) continue else: continue lowerCAmelCase__ : List[Any] = 0 for i, chunk in enumerate(UpperCamelCase ): if chunk == SPIECE_UNDERLINE: continue if self.is_ch_char(UpperCamelCase ) or self.is_punct(UpperCamelCase ): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) new_pieces.append(UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = i + 1 elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) lowerCAmelCase__ : Dict = i elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) lowerCAmelCase__ : Any = i if len(UpperCamelCase ) > lst_i: new_pieces.append(piece[lst_i:] ) return new_pieces def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : Optional[int] ) -> Union[str, Any]: """simple docstring""" lowerCAmelCase__ : str = """""".join(UpperCamelCase ).replace(UpperCamelCase , """ """ ).strip() return out_string def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : Optional[Any] ) -> Optional[int]: """simple docstring""" lowerCAmelCase__ : List[str] = self.convert_ids_to_tokens(UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = """""".join(UpperCamelCase ).replace(UpperCamelCase , """ """ ).strip() return out_string def _lowerCAmelCase ( self : Tuple , UpperCamelCase : int ) -> Union[str, Any]: """simple docstring""" return self.vocab.get(UpperCamelCase , self.vocab.get(self.unk_token ) ) def _lowerCAmelCase ( self : str , UpperCamelCase : Any ) -> Tuple: """simple docstring""" return self.reverse_vocab.get(UpperCamelCase , self.unk_token ) def _lowerCAmelCase ( self : Tuple , UpperCamelCase : List[str] , UpperCamelCase : Tuple=None ) -> List[str]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCAmelCase__ : Optional[Any] = [self.cls_token_id] lowerCAmelCase__ : List[str] = [self.sep_token_id] return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep def _lowerCAmelCase ( self : Tuple , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[Any]=None ) -> Any: """simple docstring""" if offset_mapping_a is None: return [(0, 0)] + offset_mapping_a + [(0, 0)] return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)] def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict=None , UpperCamelCase : Any=False ) -> List[Any]: """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( """You should not supply a second sequence if the provided sequence of """ """ids is already formatted with special tokens for the model.""" ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(UpperCamelCase )) + [1, 1] + ([0] * len(UpperCamelCase )) + [1] return [1] + ([0] * len(UpperCamelCase )) + [1] def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ) -> List[int]: """simple docstring""" # called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method if token_ids_a is None: # [CLS] X [SEP] return (len(UpperCamelCase ) + 2) * [0] # [CLS] A [SEP] [SEP] B [SEP] return [0] * (len(UpperCamelCase ) + 1) + [1] * (len(UpperCamelCase ) + 3) def _lowerCAmelCase ( self : Any , UpperCamelCase : Union[str, Any] ) -> Any: """simple docstring""" if "\u4e00" <= char <= "\u9fff": return True return False def _lowerCAmelCase ( self : List[Any] , UpperCamelCase : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" if ("a" <= char <= "z") or ("A" <= char <= "Z"): return True return False def _lowerCAmelCase ( self : Tuple , UpperCamelCase : str ) -> Tuple: """simple docstring""" if char in ",;:.?!~,;:。?!《》【】": return True return False def _lowerCAmelCase ( self : int , UpperCamelCase : List[Any] ) -> Dict: """simple docstring""" if char == " " or char == "\t" or char == "\n" or char == "\r": return True if len(UpperCamelCase ) == 1: lowerCAmelCase__ : List[Any] = unicodedata.category(UpperCamelCase ) if cat == "Zs": return True return False def _lowerCAmelCase ( self : str , UpperCamelCase : Any ) -> int: """simple docstring""" lowerCAmelCase__ : Union[str, Any] = {} with io.open(UpperCamelCase , """r""" , encoding="""utf-8""" ) as f: for index, line in enumerate(UpperCamelCase ): lowerCAmelCase__ : Any = line.rstrip("""\n""" ) lowerCAmelCase__ : Optional[Any] = int(UpperCamelCase ) return token_to_idx def _lowerCAmelCase ( self : Any , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ) -> Tuple[str]: """simple docstring""" lowerCAmelCase__ : Any = 0 if os.path.isdir(UpperCamelCase ): lowerCAmelCase__ : List[str] = os.path.join( UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) else: lowerCAmelCase__ : List[Any] = (filename_prefix + """-""" if filename_prefix else """""") + save_directory with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as writer: for token, token_index in sorted(self.vocab.items() , key=lambda UpperCamelCase : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.""" """ Please check that the vocabulary is not corrupted!""" ) lowerCAmelCase__ : Union[str, Any] = token_index writer.write(token + """\n""" ) index += 1 lowerCAmelCase__ : Dict = os.path.join(UpperCamelCase , """sentencepiece.bpe.model""" ) with open(UpperCamelCase , """wb""" ) as fi: lowerCAmelCase__ : Any = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase ) return (vocab_file,)
507
1