code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' def a_ ( __snake_case : str , __snake_case : Dict ) -> Dict: """simple docstring""" return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2 def a_ ( __snake_case : Tuple , __snake_case : Union[str, Any]=0 ) -> List[str]: """simple docstring""" return sorted(__snake_case , key=lambda __snake_case : x[column] ) def a_ ( __snake_case : Any , __snake_case : Dict , __snake_case : Tuple=float('''inf''' ) ) -> Union[str, Any]: """simple docstring""" for i in range(points_counts - 1 ): for j in range(i + 1 , __snake_case ): lowerCamelCase_ =euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: lowerCamelCase_ =current_dis return min_dis def a_ ( __snake_case : Optional[Any] , __snake_case : Tuple , __snake_case : Optional[int]=float('''inf''' ) ) -> str: """simple docstring""" for i in range(min(6 , points_counts - 1 ) , __snake_case ): for j in range(max(0 , i - 6 ) , __snake_case ): lowerCamelCase_ =euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: lowerCamelCase_ =current_dis return min_dis def a_ ( __snake_case : Any , __snake_case : Optional[int] , __snake_case : Dict ) -> Optional[Any]: """simple docstring""" # base case if points_counts <= 3: return dis_between_closest_pair(__snake_case , __snake_case ) # recursion lowerCamelCase_ =points_counts // 2 lowerCamelCase_ =closest_pair_of_points_sqr( __snake_case , points_sorted_on_y[:mid] , __snake_case ) lowerCamelCase_ =closest_pair_of_points_sqr( __snake_case , points_sorted_on_y[mid:] , points_counts - mid ) lowerCamelCase_ =min(__snake_case , __snake_case ) lowerCamelCase_ =[] for point in points_sorted_on_x: if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis: cross_strip.append(__snake_case ) lowerCamelCase_ =dis_between_closest_in_strip( __snake_case , len(__snake_case ) , __snake_case ) return min(__snake_case , __snake_case ) def a_ ( __snake_case : Union[str, Any] , __snake_case : str ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ =column_based_sort(__snake_case , column=0 ) lowerCamelCase_ =column_based_sort(__snake_case , column=1 ) return ( closest_pair_of_points_sqr( __snake_case , __snake_case , __snake_case ) ) ** 0.5 if __name__ == "__main__": a_ : Optional[int] = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)] print("""Distance:""", closest_pair_of_points(points, len(points)))
75
'''simple docstring''' from collections import UserDict from typing import Union import numpy as np import requests from ..utils import ( add_end_docstrings, logging, ) from .audio_classification import ffmpeg_read from .base import PIPELINE_INIT_ARGS, Pipeline a_ : Any = logging.get_logger(__name__) @add_end_docstrings(lowerCamelCase__ ) class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, **lowerCAmelCase ): """simple docstring""" super().__init__(**lowerCAmelCase ) if self.framework != "pt": raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' ) # No specific FOR_XXX available yet def __call__( self, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return super().__call__(lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ ={} if "candidate_labels" in kwargs: lowerCamelCase_ =kwargs['''candidate_labels'''] if "hypothesis_template" in kwargs: lowerCamelCase_ =kwargs['''hypothesis_template'''] return preprocess_params, {}, {} def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None, lowerCAmelCase="This is a sound of {}." ): """simple docstring""" if isinstance(lowerCAmelCase, lowerCAmelCase ): if audio.startswith('''http://''' ) or audio.startswith('''https://''' ): # We need to actually check for a real protocol, otherwise it's impossible to use a local file # like http_huggingface_co.png lowerCamelCase_ =requests.get(lowerCAmelCase ).content else: with open(lowerCAmelCase, '''rb''' ) as f: lowerCamelCase_ =f.read() if isinstance(lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =ffmpeg_read(lowerCAmelCase, self.feature_extractor.sampling_rate ) if not isinstance(lowerCAmelCase, np.ndarray ): raise ValueError('''We expect a numpy ndarray as input''' ) if len(audio.shape ) != 1: raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' ) lowerCamelCase_ =self.feature_extractor( [audio], sampling_rate=self.feature_extractor.sampling_rate, return_tensors='''pt''' ) lowerCamelCase_ =candidate_labels lowerCamelCase_ =[hypothesis_template.format(lowerCAmelCase ) for x in candidate_labels] lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=self.framework, padding=lowerCAmelCase ) lowerCamelCase_ =[text_inputs] return inputs def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =model_inputs.pop('''candidate_labels''' ) lowerCamelCase_ =model_inputs.pop('''text_inputs''' ) if isinstance(text_inputs[0], lowerCAmelCase ): lowerCamelCase_ =text_inputs[0] else: # Batching case. lowerCamelCase_ =text_inputs[0][0] lowerCamelCase_ =self.model(**lowerCAmelCase, **lowerCAmelCase ) lowerCamelCase_ ={ '''candidate_labels''': candidate_labels, '''logits''': outputs.logits_per_audio, } return model_outputs def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =model_outputs.pop('''candidate_labels''' ) lowerCamelCase_ =model_outputs['''logits'''][0] if self.framework == "pt": lowerCamelCase_ =logits.softmax(dim=0 ) lowerCamelCase_ =probs.tolist() else: raise ValueError('''`tf` framework not supported.''' ) lowerCamelCase_ =[ {'''score''': score, '''label''': candidate_label} for score, candidate_label in sorted(zip(lowerCAmelCase, lowerCAmelCase ), key=lambda lowerCAmelCase : -x[0] ) ] return result
75
1
'''simple docstring''' import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy if is_flax_available(): import jax.numpy as jnp class __UpperCamelCase ( lowerCamelCase__ ): def __get__( self, lowerCAmelCase, lowerCAmelCase=None ): """simple docstring""" if obj is None: return self if self.fget is None: raise AttributeError('''unreadable attribute''' ) lowerCamelCase_ ='''__cached_''' + self.fget.__name__ lowerCamelCase_ =getattr(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) if cached is None: lowerCamelCase_ =self.fget(lowerCAmelCase ) setattr(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) return cached def a_ ( __snake_case : str ) -> List[str]: """simple docstring""" lowerCamelCase_ =val.lower() if val in {"y", "yes", "t", "true", "on", "1"}: return 1 if val in {"n", "no", "f", "false", "off", "0"}: return 0 raise ValueError(F'''invalid truth value {val!r}''' ) def a_ ( __snake_case : Any ) -> List[str]: """simple docstring""" if is_torch_fx_proxy(__snake_case ): return True if is_torch_available(): import torch if isinstance(__snake_case , torch.Tensor ): return True if is_tf_available(): import tensorflow as tf if isinstance(__snake_case , tf.Tensor ): return True if is_flax_available(): import jax.numpy as jnp from jax.core import Tracer if isinstance(__snake_case , (jnp.ndarray, Tracer) ): return True return isinstance(__snake_case , np.ndarray ) def a_ ( __snake_case : Dict ) -> List[str]: """simple docstring""" return isinstance(__snake_case , np.ndarray ) def a_ ( __snake_case : Union[str, Any] ) -> Tuple: """simple docstring""" return _is_numpy(__snake_case ) def a_ ( __snake_case : List[str] ) -> Optional[Any]: """simple docstring""" import torch return isinstance(__snake_case , torch.Tensor ) def a_ ( __snake_case : Tuple ) -> List[Any]: """simple docstring""" return False if not is_torch_available() else _is_torch(__snake_case ) def a_ ( __snake_case : Any ) -> int: """simple docstring""" import torch return isinstance(__snake_case , torch.device ) def a_ ( __snake_case : List[Any] ) -> int: """simple docstring""" return False if not is_torch_available() else _is_torch_device(__snake_case ) def a_ ( __snake_case : Dict ) -> List[str]: """simple docstring""" import torch if isinstance(__snake_case , __snake_case ): if hasattr(__snake_case , __snake_case ): lowerCamelCase_ =getattr(__snake_case , __snake_case ) else: return False return isinstance(__snake_case , torch.dtype ) def a_ ( __snake_case : str ) -> Any: """simple docstring""" return False if not is_torch_available() else _is_torch_dtype(__snake_case ) def a_ ( __snake_case : List[Any] ) -> Optional[int]: """simple docstring""" import tensorflow as tf return isinstance(__snake_case , tf.Tensor ) def a_ ( __snake_case : str ) -> Optional[int]: """simple docstring""" return False if not is_tf_available() else _is_tensorflow(__snake_case ) def a_ ( __snake_case : str ) -> Optional[Any]: """simple docstring""" import tensorflow as tf # the `is_symbolic_tensor` predicate is only available starting with TF 2.14 if hasattr(__snake_case , '''is_symbolic_tensor''' ): return tf.is_symbolic_tensor(__snake_case ) return type(__snake_case ) == tf.Tensor def a_ ( __snake_case : Dict ) -> Union[str, Any]: """simple docstring""" return False if not is_tf_available() else _is_tf_symbolic_tensor(__snake_case ) def a_ ( __snake_case : Optional[int] ) -> List[Any]: """simple docstring""" import jax.numpy as jnp # noqa: F811 return isinstance(__snake_case , jnp.ndarray ) def a_ ( __snake_case : Optional[Any] ) -> Tuple: """simple docstring""" return False if not is_flax_available() else _is_jax(__snake_case ) def a_ ( __snake_case : Any ) -> Union[str, Any]: """simple docstring""" if isinstance(__snake_case , (dict, UserDict) ): return {k: to_py_obj(__snake_case ) for k, v in obj.items()} elif isinstance(__snake_case , (list, tuple) ): return [to_py_obj(__snake_case ) for o in obj] elif is_tf_tensor(__snake_case ): return obj.numpy().tolist() elif is_torch_tensor(__snake_case ): return obj.detach().cpu().tolist() elif is_jax_tensor(__snake_case ): return np.asarray(__snake_case ).tolist() elif isinstance(__snake_case , (np.ndarray, np.number) ): # tolist also works on 0d np arrays return obj.tolist() else: return obj def a_ ( __snake_case : Optional[Any] ) -> int: """simple docstring""" if isinstance(__snake_case , (dict, UserDict) ): return {k: to_numpy(__snake_case ) for k, v in obj.items()} elif isinstance(__snake_case , (list, tuple) ): return np.array(__snake_case ) elif is_tf_tensor(__snake_case ): return obj.numpy() elif is_torch_tensor(__snake_case ): return obj.detach().cpu().numpy() elif is_jax_tensor(__snake_case ): return np.asarray(__snake_case ) else: return obj class __UpperCamelCase ( lowerCamelCase__ ): def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =fields(self ) # Safety and consistency checks if not len(lowerCAmelCase ): raise ValueError(f'''{self.__class__.__name__} has no fields.''' ) if not all(field.default is None for field in class_fields[1:] ): raise ValueError(f'''{self.__class__.__name__} should not have more than one required field.''' ) lowerCamelCase_ =getattr(self, class_fields[0].name ) lowerCamelCase_ =all(getattr(self, field.name ) is None for field in class_fields[1:] ) if other_fields_are_none and not is_tensor(lowerCAmelCase ): if isinstance(lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =first_field.items() lowerCamelCase_ =True else: try: lowerCamelCase_ =iter(lowerCAmelCase ) lowerCamelCase_ =True except TypeError: lowerCamelCase_ =False # if we provided an iterator as first field and the iterator is a (key, value) iterator # set the associated fields if first_field_iterator: for idx, element in enumerate(lowerCAmelCase ): if ( not isinstance(lowerCAmelCase, (list, tuple) ) or not len(lowerCAmelCase ) == 2 or not isinstance(element[0], lowerCAmelCase ) ): if idx == 0: # If we do not have an iterator of key/values, set it as attribute lowerCamelCase_ =first_field else: # If we have a mixed iterator, raise an error raise ValueError( f'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' ) break setattr(self, element[0], element[1] ) if element[1] is not None: lowerCamelCase_ =element[1] elif first_field is not None: lowerCamelCase_ =first_field else: for field in class_fields: lowerCamelCase_ =getattr(self, field.name ) if v is not None: lowerCamelCase_ =v def __delitem__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" raise Exception(f'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' ) def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" raise Exception(f'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' ) def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" raise Exception(f'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' ) def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" raise Exception(f'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' ) def __getitem__( self, lowerCAmelCase ): """simple docstring""" if isinstance(lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =dict(self.items() ) return inner_dict[k] else: return self.to_tuple()[k] def __setattr__( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" if name in self.keys() and value is not None: # Don't call self.__setitem__ to avoid recursion errors super().__setitem__(lowerCAmelCase, lowerCAmelCase ) super().__setattr__(lowerCAmelCase, lowerCAmelCase ) def __setitem__( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" super().__setitem__(lowerCAmelCase, lowerCAmelCase ) # Don't call self.__setattr__ to avoid recursion errors super().__setattr__(lowerCAmelCase, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" return tuple(self[k] for k in self.keys() ) class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): @classmethod def lowercase__ ( cls, lowerCAmelCase ): """simple docstring""" raise ValueError( f'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' ) class __UpperCamelCase ( lowerCamelCase__ ): lowercase : str ='longest' lowercase : Tuple ='max_length' lowercase : Dict ='do_not_pad' class __UpperCamelCase ( lowerCamelCase__ ): lowercase : str ='pt' lowercase : Any ='tf' lowercase : str ='np' lowercase : List[Any] ='jax' class __UpperCamelCase : def __init__( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =context_managers lowerCamelCase_ =ExitStack() def __enter__( self ): """simple docstring""" for context_manager in self.context_managers: self.stack.enter_context(lowerCAmelCase ) def __exit__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" self.stack.__exit__(*lowerCAmelCase, **lowerCAmelCase ) def a_ ( __snake_case : Optional[Any] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ =infer_framework(__snake_case ) if framework == "tf": lowerCamelCase_ =inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": lowerCamelCase_ =inspect.signature(model_class.forward ) # PyTorch models else: lowerCamelCase_ =inspect.signature(model_class.__call__ ) # Flax models for p in signature.parameters: if p == "return_loss" and signature.parameters[p].default is True: return True return False def a_ ( __snake_case : Any ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ =model_class.__name__ lowerCamelCase_ =infer_framework(__snake_case ) if framework == "tf": lowerCamelCase_ =inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": lowerCamelCase_ =inspect.signature(model_class.forward ) # PyTorch models else: lowerCamelCase_ =inspect.signature(model_class.__call__ ) # Flax models if "QuestionAnswering" in model_name: return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")] else: return [p for p in signature.parameters if "label" in p] def a_ ( __snake_case : MutableMapping , __snake_case : str = "" , __snake_case : str = "." ) -> str: """simple docstring""" def _flatten_dict(__snake_case : List[str] , __snake_case : Tuple="" , __snake_case : str="." ): for k, v in d.items(): lowerCamelCase_ =str(__snake_case ) + delimiter + str(__snake_case ) if parent_key else k if v and isinstance(__snake_case , __snake_case ): yield from flatten_dict(__snake_case , __snake_case , delimiter=__snake_case ).items() else: yield key, v return dict(_flatten_dict(__snake_case , __snake_case , __snake_case ) ) @contextmanager def a_ ( __snake_case : Tuple , __snake_case : bool = False ) -> Tuple: """simple docstring""" if use_temp_dir: with tempfile.TemporaryDirectory() as tmp_dir: yield tmp_dir else: yield working_dir def a_ ( __snake_case : Dict , __snake_case : List[str]=None ) -> List[str]: """simple docstring""" if is_numpy_array(__snake_case ): return np.transpose(__snake_case , axes=__snake_case ) elif is_torch_tensor(__snake_case ): return array.T if axes is None else array.permute(*__snake_case ) elif is_tf_tensor(__snake_case ): import tensorflow as tf return tf.transpose(__snake_case , perm=__snake_case ) elif is_jax_tensor(__snake_case ): return jnp.transpose(__snake_case , axes=__snake_case ) else: raise ValueError(F'''Type not supported for transpose: {type(__snake_case )}.''' ) def a_ ( __snake_case : List[Any] , __snake_case : Tuple ) -> Dict: """simple docstring""" if is_numpy_array(__snake_case ): return np.reshape(__snake_case , __snake_case ) elif is_torch_tensor(__snake_case ): return array.reshape(*__snake_case ) elif is_tf_tensor(__snake_case ): import tensorflow as tf return tf.reshape(__snake_case , __snake_case ) elif is_jax_tensor(__snake_case ): return jnp.reshape(__snake_case , __snake_case ) else: raise ValueError(F'''Type not supported for reshape: {type(__snake_case )}.''' ) def a_ ( __snake_case : Optional[int] , __snake_case : Optional[int]=None ) -> Optional[int]: """simple docstring""" if is_numpy_array(__snake_case ): return np.squeeze(__snake_case , axis=__snake_case ) elif is_torch_tensor(__snake_case ): return array.squeeze() if axis is None else array.squeeze(dim=__snake_case ) elif is_tf_tensor(__snake_case ): import tensorflow as tf return tf.squeeze(__snake_case , axis=__snake_case ) elif is_jax_tensor(__snake_case ): return jnp.squeeze(__snake_case , axis=__snake_case ) else: raise ValueError(F'''Type not supported for squeeze: {type(__snake_case )}.''' ) def a_ ( __snake_case : int , __snake_case : Union[str, Any] ) -> Optional[Any]: """simple docstring""" if is_numpy_array(__snake_case ): return np.expand_dims(__snake_case , __snake_case ) elif is_torch_tensor(__snake_case ): return array.unsqueeze(dim=__snake_case ) elif is_tf_tensor(__snake_case ): import tensorflow as tf return tf.expand_dims(__snake_case , axis=__snake_case ) elif is_jax_tensor(__snake_case ): return jnp.expand_dims(__snake_case , axis=__snake_case ) else: raise ValueError(F'''Type not supported for expand_dims: {type(__snake_case )}.''' ) def a_ ( __snake_case : Dict ) -> List[str]: """simple docstring""" if is_numpy_array(__snake_case ): return np.size(__snake_case ) elif is_torch_tensor(__snake_case ): return array.numel() elif is_tf_tensor(__snake_case ): import tensorflow as tf return tf.size(__snake_case ) elif is_jax_tensor(__snake_case ): return array.size else: raise ValueError(F'''Type not supported for expand_dims: {type(__snake_case )}.''' ) def a_ ( __snake_case : Tuple , __snake_case : int ) -> Optional[int]: """simple docstring""" for key, value in auto_map.items(): if isinstance(__snake_case , (tuple, list) ): lowerCamelCase_ =[F'''{repo_id}--{v}''' if (v is not None and '''--''' not in v) else v for v in value] elif value is not None and "--" not in value: lowerCamelCase_ =F'''{repo_id}--{value}''' return auto_map def a_ ( __snake_case : Optional[Any] ) -> Tuple: """simple docstring""" for base_class in inspect.getmro(__snake_case ): lowerCamelCase_ =base_class.__module__ lowerCamelCase_ =base_class.__name__ if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel": return "tf" elif module.startswith('''torch''' ) or name == "PreTrainedModel": return "pt" elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel": return "flax" else: raise TypeError(F'''Could not infer framework from class {model_class}.''' )
75
'''simple docstring''' import unittest from pathlib import Path from shutil import copyfile from transformers import SPIECE_UNDERLINE, is_sentencepiece_available from transformers.models.speech_to_text import SpeechaTextTokenizer from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin a_ : str = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_sentencepiece_available(): import sentencepiece as sp a_ : Optional[Any] = 5 a_ : str = 10 @require_sentencepiece @require_tokenizers class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ): lowercase : int =SpeechaTextTokenizer lowercase : int =False lowercase : List[str] =True def lowercase__ ( self ): """simple docstring""" super().setUp() lowerCamelCase_ =sp.SentencePieceProcessor() spm_model.Load(lowerCAmelCase ) lowerCamelCase_ =['''<s>''', '''<pad>''', '''</s>''', '''<unk>'''] vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(lowerCAmelCase ) )] lowerCamelCase_ =dict(zip(lowerCAmelCase, range(len(lowerCAmelCase ) ) ) ) lowerCamelCase_ =Path(self.tmpdirname ) save_json(lowerCAmelCase, save_dir / VOCAB_FILES_NAMES['''vocab_file'''] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(lowerCAmelCase, save_dir / VOCAB_FILES_NAMES['''spm_file'''] ) lowerCamelCase_ =SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''<pad>''' lowerCamelCase_ =1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase ), lowerCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase ), lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0], '''<s>''' ) self.assertEqual(vocab_keys[1], '''<pad>''' ) self.assertEqual(vocab_keys[-1], '''j''' ) self.assertEqual(len(lowerCAmelCase ), 1_001 ) def lowercase__ ( self ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size, 1_001 ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) lowerCamelCase_ =tokenizer.tokenize('''This is a test''' ) self.assertListEqual(lowerCAmelCase, ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCAmelCase ), [289, 50, 14, 174, 386], ) lowerCamelCase_ =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( lowerCAmelCase, [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''], ) lowerCamelCase_ =tokenizer.convert_tokens_to_ids(lowerCAmelCase ) self.assertListEqual(lowerCAmelCase, [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] ) lowerCamelCase_ =tokenizer.convert_ids_to_tokens(lowerCAmelCase ) self.assertListEqual( lowerCAmelCase, [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''], ) @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ={'''input_ids''': [[3_791, 797, 31, 11, 64, 797, 31, 2_429, 433, 12, 1_176, 12, 20, 786, 915, 142, 2_413, 240, 37, 3_238, 797, 31, 11, 35, 93, 915, 142, 2_413, 240, 37, 5_540, 567, 1_276, 93, 37, 610, 40, 62, 455, 657, 1_042, 123, 780, 177, 37, 309, 241, 1_298, 514, 20, 292, 2_737, 114, 2_469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3_388, 511, 459, 4, 3_555, 40, 321, 302, 705, 4, 3_388, 511, 583, 326, 5, 5, 5, 62, 3_310, 560, 177, 2_680, 217, 1_508, 32, 31, 853, 418, 64, 583, 511, 1_605, 62, 35, 93, 560, 177, 2_680, 217, 1_508, 1_521, 64, 583, 511, 519, 62, 20, 1_515, 764, 20, 149, 261, 5_625, 7_972, 20, 5_540, 567, 1_276, 93, 3_925, 1_675, 11, 15, 802, 7_972, 576, 217, 1_508, 11, 35, 93, 1_253, 2_441, 15, 289, 652, 31, 416, 321, 3_842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2_681, 1_153, 3_434, 20, 5_540, 37, 567, 126, 1_253, 2_441, 3_376, 449, 210, 431, 1_563, 177, 767, 5_540, 11, 1_203, 472, 11, 2_953, 685, 285, 364, 706, 1_153, 20, 6_799, 20, 2_869, 20, 4_464, 126, 40, 2_429, 20, 1_040, 866, 2_664, 418, 20, 318, 20, 1_726, 186, 20, 265, 522, 35, 93, 2_191, 4_634, 20, 1_040, 12, 6_799, 15, 228, 2_356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_575, 2_666, 684, 1_582, 1_176, 12, 627, 149, 619, 20, 4_902, 563, 11, 20, 149, 261, 3_420, 2_356, 174, 142, 4_714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase, model_name='''facebook/s2t-small-mustc-en-de-st''', revision='''a14f04cf0776c02f62a8cb800cf7909e15ea23ad''', ) @require_sentencepiece class __UpperCamelCase ( unittest.TestCase ): lowercase : Tuple ='valhalla/s2t_mustc_multilinguial_medium' lowercase : Dict ='C\'est trop cool' lowercase : str ='Esto es genial' @classmethod def lowercase__ ( cls ): """simple docstring""" lowerCamelCase_ =SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name ) return cls def lowercase__ ( self ): """simple docstring""" self.assertEqual(self.tokenizer.lang_code_to_id['''pt'''], 4 ) self.assertEqual(self.tokenizer.lang_code_to_id['''ru'''], 6 ) self.assertEqual(self.tokenizer.lang_code_to_id['''it'''], 9 ) self.assertEqual(self.tokenizer.lang_code_to_id['''de'''], 11 ) def lowercase__ ( self ): """simple docstring""" self.assertEqual(self.tokenizer.vocab_size, 10_000 ) def lowercase__ ( self ): """simple docstring""" self.assertIn(lowerCAmelCase, self.tokenizer.all_special_ids ) lowerCamelCase_ =[ES_CODE, 4, 1_601, 47, 7_647, 2] lowerCamelCase_ =self.tokenizer.decode(lowerCAmelCase, skip_special_tokens=lowerCAmelCase ) lowerCamelCase_ =self.tokenizer.decode(generated_ids[1:], skip_special_tokens=lowerCAmelCase ) self.assertEqual(lowerCAmelCase, lowerCAmelCase ) self.assertNotIn(self.tokenizer.eos_token, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''fr''' lowerCamelCase_ =self.tokenizer(self.french_text ).input_ids self.assertEqual(encoded[0], lowerCAmelCase ) self.assertEqual(encoded[-1], self.tokenizer.eos_token_id ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''fr''' self.assertListEqual(self.tokenizer.prefix_tokens, [FR_CODE] ) lowerCamelCase_ ='''es''' self.assertListEqual(self.tokenizer.prefix_tokens, [ES_CODE] )
75
1
'''simple docstring''' from math import loga def a_ ( __snake_case : int ) -> int: """simple docstring""" if a < 0: raise ValueError('''Input value must be a positive integer''' ) elif isinstance(__snake_case , __snake_case ): raise TypeError('''Input value must be a \'int\' type''' ) return 0 if (a == 0) else int(loga(a & -a ) ) if __name__ == "__main__": import doctest doctest.testmod()
75
'''simple docstring''' import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch) # also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml # same for Vicuna-13b from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipImageProcessor, InstructBlipConfig, InstructBlipForConditionalGeneration, InstructBlipProcessor, InstructBlipQFormerConfig, InstructBlipVisionConfig, LlamaConfig, LlamaTokenizerFast, TaConfig, TaTokenizerFast, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def a_ ( ) -> Dict: """simple docstring""" lowerCamelCase_ ='''https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg''' lowerCamelCase_ =Image.open(requests.get(__snake_case , stream=__snake_case ).raw ).convert('''RGB''' ) return image def a_ ( __snake_case : Tuple ) -> Dict: """simple docstring""" lowerCamelCase_ =[] # fmt: off # vision encoder rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') ) rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') ) rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') ) rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') ) rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') ) rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') ) # QFormer rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.embeddings.layernorm.weight''') ) rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.embeddings.layernorm.bias''') ) # fmt: on return rename_keys def a_ ( __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : List[Any] ) -> Dict: """simple docstring""" lowerCamelCase_ =dct.pop(__snake_case ) lowerCamelCase_ =val def a_ ( __snake_case : str , __snake_case : Optional[Any] ) -> Any: """simple docstring""" for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases lowerCamelCase_ =state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' ) lowerCamelCase_ =state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' ) # next, set bias in the state dict lowerCamelCase_ =torch.cat((q_bias, torch.zeros_like(__snake_case , requires_grad=__snake_case ), v_bias) ) lowerCamelCase_ =qkv_bias def a_ ( __snake_case : List[str] ) -> Any: """simple docstring""" lowerCamelCase_ =364 if '''coco''' in model_name else 224 lowerCamelCase_ =InstructBlipVisionConfig(image_size=__snake_case ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "t5-xl" in model_name: lowerCamelCase_ =TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: lowerCamelCase_ =TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict() elif "vicuna-7b" in model_name: lowerCamelCase_ =LlamaConfig.from_pretrained('''decapoda-research/llama-7b-hf''' , vocab_size=3_2001 ).to_dict() elif "vicuna-13b" in model_name: lowerCamelCase_ =LlamaConfig.from_pretrained('''decapoda-research/llama-13b-hf''' , vocab_size=3_2001 ).to_dict() else: raise ValueError('''Model name not supported''' ) # the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1 lowerCamelCase_ =InstructBlipQFormerConfig(vocab_size=3_0523 ).to_dict() lowerCamelCase_ =InstructBlipConfig(vision_config=__snake_case , text_config=__snake_case , qformer_config=__snake_case ) return config, image_size @torch.no_grad() def a_ ( __snake_case : Optional[int] , __snake_case : str=None , __snake_case : List[Any]=False ) -> List[str]: """simple docstring""" lowerCamelCase_ =AutoTokenizer.from_pretrained('''bert-base-uncased''' , truncation_side='''left''' ) qformer_tokenizer.add_special_tokens({'''bos_token''': '''[DEC]'''} ) if "t5" in model_name: lowerCamelCase_ =TaTokenizerFast.from_pretrained('''google/flan-t5-xl''' , truncation_side='''left''' ) elif "vicuna" in model_name: # the following was used in the original implementation: # tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left") # tokenizer.add_special_tokens({"pad_token": "[PAD]"}) # tokenizer.add_special_tokens({"bos_token": "</s>"}) # tokenizer.add_special_tokens({"eos_token": "</s>"}) # tokenizer.add_special_tokens({"unk_token": "</s>"}) lowerCamelCase_ =LlamaTokenizerFast.from_pretrained( '''huggyllama/llama-7b''' , truncation_side='''left''' , bos_token='''</s>''' , unk_token='''</s>''' ) tokenizer.add_special_tokens({'''pad_token''': '''[PAD]'''} ) lowerCamelCase_, lowerCamelCase_ =get_blipa_config(__snake_case ) lowerCamelCase_ =InstructBlipForConditionalGeneration(__snake_case ).eval() lowerCamelCase_ ={ '''instructblip-vicuna-7b''': ('''blip2_vicuna_instruct''', '''vicuna7b'''), '''instructblip-vicuna-13b''': ('''blip2_vicuna_instruct''', '''vicuna13b'''), '''instructblip-flan-t5-xl''': ('''blip2_t5_instruct''', '''flant5xl'''), '''instructblip-flan-t5-xxl''': ('''blip2_t5_instruct''', '''flant5xxl'''), } lowerCamelCase_, lowerCamelCase_ =model_name_to_original[model_name] # load original model print('''Loading original model...''' ) lowerCamelCase_ ='''cuda:1''' if torch.cuda.is_available() else '''cpu''' lowerCamelCase_ ='''cuda:2''' if torch.cuda.is_available() else '''cpu''' lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =load_model_and_preprocess( name=__snake_case , model_type=__snake_case , is_eval=__snake_case , device=__snake_case ) original_model.eval() print('''Done!''' ) # update state dict keys lowerCamelCase_ =original_model.state_dict() lowerCamelCase_ =create_rename_keys(__snake_case ) for src, dest in rename_keys: rename_key(__snake_case , __snake_case , __snake_case ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): lowerCamelCase_ =state_dict.pop(__snake_case ) if key.startswith('''Qformer.bert''' ): lowerCamelCase_ =key.replace('''Qformer.bert''' , '''qformer''' ) if "attention.self" in key: lowerCamelCase_ =key.replace('''self''' , '''attention''' ) if "llm_proj" in key: lowerCamelCase_ =key.replace('''llm_proj''' , '''language_projection''' ) if "t5_proj" in key: lowerCamelCase_ =key.replace('''t5_proj''' , '''language_projection''' ) if key.startswith('''llm_model''' ): lowerCamelCase_ =key.replace('''llm_model''' , '''language_model''' ) if key.startswith('''t5''' ): lowerCamelCase_ =key.replace('''t5''' , '''language''' ) lowerCamelCase_ =val # read in qv biases read_in_q_v_bias(__snake_case , __snake_case ) # note: weights get loaded in torch.float32 by default hf_model.load_state_dict(__snake_case , strict=__snake_case ) lowerCamelCase_ =load_demo_image() lowerCamelCase_ ='''What is unusual about this image?''' # create processor lowerCamelCase_ =BlipImageProcessor( size={'''height''': image_size, '''width''': image_size} , image_mean=__snake_case , image_std=__snake_case ) lowerCamelCase_ =InstructBlipProcessor( image_processor=__snake_case , tokenizer=__snake_case , qformer_tokenizer=__snake_case , ) lowerCamelCase_ =processor(images=__snake_case , text=__snake_case , return_tensors='''pt''' ).to(__snake_case ) # make sure processor creates exact same pixel values lowerCamelCase_ =vis_processors['''eval'''](__snake_case ).unsqueeze(0 ).to(__snake_case ) lowerCamelCase_ =inputs.pixel_values assert torch.allclose(original_pixel_values.to(pixel_values.device ) , __snake_case ) original_model.to(__snake_case ) hf_model.to(__snake_case ) with torch.no_grad(): if "vicuna" in model_name: lowerCamelCase_ =original_model({'''image''': original_pixel_values, '''text_input''': [prompt]} ).logits lowerCamelCase_ =hf_model(**__snake_case ).logits else: lowerCamelCase_ =original_model( {'''image''': original_pixel_values, '''text_input''': [prompt], '''text_output''': ['''\n''']} ).logits lowerCamelCase_ =tokenizer('''\n''' , return_tensors='''pt''' ).input_ids.to(__snake_case ) lowerCamelCase_ =label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 ) lowerCamelCase_ =hf_model(**__snake_case , labels=__snake_case ).logits print('''First values of original logits:''' , original_logits[0, :3, :3] ) print('''First values of HF logits:''' , logits[0, :3, :3] ) # assert values assert original_logits.shape == logits.shape lowerCamelCase_ =1e-4 if '''vicuna''' in model_name else 1e-5 assert torch.allclose(original_logits.to(logits.device ) , __snake_case , atol=__snake_case ) print('''Looks ok!''' ) print('''Generating with original model...''' ) lowerCamelCase_ =original_model.generate({'''image''': original_pixel_values, '''prompt''': prompt} , num_beams=5 ) # important: we need to cast the weights of the HF model to the appropriate type print('''Generating with HF model...''' ) lowerCamelCase_ =hf_model.generate( **__snake_case , do_sample=__snake_case , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , ) if "vicuna" in model_name: # convert output id 0 to 2 (eos_token_id) # TODO add this in the generate method? lowerCamelCase_ =2 print('''Original generation:''' , __snake_case ) lowerCamelCase_ =processor.batch_decode(__snake_case , skip_special_tokens=__snake_case ) lowerCamelCase_ =[text.strip() for text in output_text] print('''HF generation:''' , __snake_case ) if pytorch_dump_folder_path is not None: processor.save_pretrained(__snake_case ) hf_model.save_pretrained(__snake_case ) if push_to_hub: processor.push_to_hub(F'''Salesforce/{model_name}''' ) hf_model.push_to_hub(F'''Salesforce/{model_name}''' ) if __name__ == "__main__": a_ : Any = argparse.ArgumentParser() a_ : Any = [ """instructblip-vicuna-7b""", """instructblip-vicuna-13b""", """instructblip-flan-t5-xl""", """instructblip-flan-t5-xxl""", ] parser.add_argument( """--model_name""", default="""instructblip-flan-t5-xl""", choices=choices, type=str, help="""Path to hf config.json of model to convert""", ) parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model and processor to the hub after converting""", ) a_ : str = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
75
1
'''simple docstring''' from __future__ import annotations class __UpperCamelCase : def __init__( self, lowerCAmelCase = 0 ): """simple docstring""" lowerCamelCase_ =key def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" assert isinstance(lowerCAmelCase, lowerCAmelCase ) and isinstance(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(lowerCAmelCase ) ^ key ) for ch in content] def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" assert isinstance(lowerCAmelCase, lowerCAmelCase ) and isinstance(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(lowerCAmelCase ) ^ key ) for ch in content] def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = 0 ): """simple docstring""" assert isinstance(lowerCAmelCase, lowerCAmelCase ) and isinstance(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned lowerCamelCase_ ='''''' for ch in content: ans += chr(ord(lowerCAmelCase ) ^ key ) return ans def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = 0 ): """simple docstring""" assert isinstance(lowerCAmelCase, lowerCAmelCase ) and isinstance(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned lowerCamelCase_ ='''''' for ch in content: ans += chr(ord(lowerCAmelCase ) ^ key ) return ans def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = 0 ): """simple docstring""" assert isinstance(lowerCAmelCase, lowerCAmelCase ) and isinstance(lowerCAmelCase, lowerCAmelCase ) try: with open(lowerCAmelCase ) as fin, open('''encrypt.out''', '''w+''' ) as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(lowerCAmelCase, lowerCAmelCase ) ) except OSError: return False return True def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" assert isinstance(lowerCAmelCase, lowerCAmelCase ) and isinstance(lowerCAmelCase, lowerCAmelCase ) try: with open(lowerCAmelCase ) as fin, open('''decrypt.out''', '''w+''' ) as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(lowerCAmelCase, lowerCAmelCase ) ) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
75
'''simple docstring''' from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class __UpperCamelCase ( lowerCamelCase__ ): def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return 0.0 def a_ ( __snake_case : np.ndarray , __snake_case : int ) -> tuple[int | float, int | float]: """simple docstring""" lowerCamelCase_ =min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) lowerCamelCase_ =max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def a_ ( __snake_case : FilterType , __snake_case : int ) -> None: """simple docstring""" lowerCamelCase_ =512 lowerCamelCase_ =[1] + [0] * (size - 1) lowerCamelCase_ =[filter_type.process(__snake_case ) for item in inputs] lowerCamelCase_ =[0] * (samplerate - size) # zero-padding outputs += filler lowerCamelCase_ =np.abs(np.fft.fft(__snake_case ) ) lowerCamelCase_ =20 * np.logaa(__snake_case ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('''Frequency (Hz)''' ) plt.xscale('''log''' ) # Display within reasonable bounds lowerCamelCase_ =get_bounds(__snake_case , __snake_case ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel('''Gain (dB)''' ) plt.plot(__snake_case ) plt.show() def a_ ( __snake_case : FilterType , __snake_case : int ) -> None: """simple docstring""" lowerCamelCase_ =512 lowerCamelCase_ =[1] + [0] * (size - 1) lowerCamelCase_ =[filter_type.process(__snake_case ) for item in inputs] lowerCamelCase_ =[0] * (samplerate - size) # zero-padding outputs += filler lowerCamelCase_ =np.angle(np.fft.fft(__snake_case ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('''Frequency (Hz)''' ) plt.xscale('''log''' ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel('''Phase shift (Radians)''' ) plt.plot(np.unwrap(__snake_case , -2 * pi ) ) plt.show()
75
1
'''simple docstring''' a_ : Dict = 8.3_14_45_98 def a_ ( __snake_case : float , __snake_case : float ) -> float: """simple docstring""" if temperature < 0: raise Exception('''Temperature cannot be less than 0 K''' ) if molar_mass <= 0: raise Exception('''Molar mass cannot be less than or equal to 0 kg/mol''' ) else: return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5 if __name__ == "__main__": import doctest # run doctest doctest.testmod() # example a_ : Optional[Any] = 3_00 a_ : int = 28 a_ : Any = rms_speed_of_molecule(temperature, molar_mass) print(F"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
75
'''simple docstring''' import os import unittest from transformers import FunnelTokenizer, FunnelTokenizerFast from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ): lowercase : Union[str, Any] =FunnelTokenizer lowercase : List[str] =FunnelTokenizerFast lowercase : Union[str, Any] =True lowercase : int =True def lowercase__ ( self ): """simple docstring""" super().setUp() lowerCamelCase_ =[ '''<unk>''', '''<cls>''', '''<sep>''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] lowerCamelCase_ =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def lowercase__ ( self, **lowerCAmelCase ): """simple docstring""" return FunnelTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase ) def lowercase__ ( self, **lowerCAmelCase ): """simple docstring""" return FunnelTokenizerFast.from_pretrained(self.tmpdirname, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ ='''UNwant\u00E9d,running''' lowerCamelCase_ ='''unwanted, running''' return input_text, output_text def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.tokenizer_class(self.vocab_file ) lowerCamelCase_ =tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(lowerCAmelCase, ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ), [7, 4, 5, 10, 8, 9] ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_tokenizers(do_lower_case=lowerCAmelCase ) for tokenizer in tokenizers: lowerCamelCase_ =tokenizer('''UNwant\u00E9d,running''' ) lowerCamelCase_ =len(inputs['''input_ids'''] ) - 1 self.assertListEqual(inputs['''token_type_ids'''], [2] + [0] * sentence_len ) lowerCamelCase_ =tokenizer('''UNwant\u00E9d,running''', '''UNwant\u00E9d,running''' ) self.assertListEqual(inputs['''token_type_ids'''], [2] + [0] * sentence_len + [1] * sentence_len )
75
1
'''simple docstring''' import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging a_ : str = logging.get_logger(__name__) a_ : int = {"""vocab_file""": """spiece.model"""} a_ : Dict = { """vocab_file""": { """TsinghuaAI/CPM-Generate""": """https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model""", } } class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, lowerCAmelCase, lowerCAmelCase=False, lowerCAmelCase=True, lowerCAmelCase=False, lowerCAmelCase="<s>", lowerCAmelCase="</s>", lowerCAmelCase="<unk>", lowerCAmelCase="<sep>", lowerCAmelCase="<pad>", lowerCAmelCase="<cls>", lowerCAmelCase="<mask>", lowerCAmelCase=["<eop>", "<eod>"], lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =AddedToken(lowerCAmelCase, lstrip=lowerCAmelCase, rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase, lowerCAmelCase ) else mask_token lowerCamelCase_ ={} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=lowerCAmelCase, remove_space=lowerCAmelCase, keep_accents=lowerCAmelCase, bos_token=lowerCAmelCase, eos_token=lowerCAmelCase, unk_token=lowerCAmelCase, sep_token=lowerCAmelCase, pad_token=lowerCAmelCase, cls_token=lowerCAmelCase, mask_token=lowerCAmelCase, additional_special_tokens=lowerCAmelCase, sp_model_kwargs=self.sp_model_kwargs, **lowerCAmelCase, ) lowerCamelCase_ =3 lowerCamelCase_ =do_lower_case lowerCamelCase_ =remove_space lowerCamelCase_ =keep_accents lowerCamelCase_ =vocab_file lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowerCAmelCase ) try: import jieba except ModuleNotFoundError as error: raise error.__class__( '''You need to install jieba to use CpmTokenizer or CpmTokenizerFast. ''' '''See https://pypi.org/project/jieba/ for installation.''' ) lowerCamelCase_ =jieba lowerCamelCase_ =str.maketrans(''' \n''', '''\u2582\u2583''' ) @property # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size def lowercase__ ( self ): """simple docstring""" return len(self.sp_model ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ={self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): """simple docstring""" lowerCamelCase_ =self.__dict__.copy() lowerCamelCase_ =None return state def __setstate__( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =d # for backward compatibility if not hasattr(self, '''sp_model_kwargs''' ): lowerCamelCase_ ={} lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" if self.remove_space: lowerCamelCase_ =''' '''.join(inputs.strip().split() ) else: lowerCamelCase_ =inputs lowerCamelCase_ =outputs.replace('''``''', '''"''' ).replace('''\'\'''', '''"''' ) if not self.keep_accents: lowerCamelCase_ =unicodedata.normalize('''NFKD''', lowerCAmelCase ) lowerCamelCase_ =''''''.join([c for c in outputs if not unicodedata.combining(lowerCAmelCase )] ) if self.do_lower_case: lowerCamelCase_ =outputs.lower() return outputs def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.preprocess_text(lowerCAmelCase ) lowerCamelCase_ =self.sp_model.encode(lowerCAmelCase, out_type=lowerCAmelCase ) lowerCamelCase_ =[] for piece in pieces: if len(lowerCAmelCase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit(): lowerCamelCase_ =self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCAmelCase, '''''' ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: lowerCamelCase_ =cur_pieces[1:] else: lowerCamelCase_ =cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(lowerCAmelCase ) else: new_pieces.append(lowerCAmelCase ) return new_pieces def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return self.sp_model.PieceToId(lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return self.sp_model.IdToPiece(lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =''''''.join(lowerCAmelCase ).replace(lowerCAmelCase, ''' ''' ).strip() return out_string def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" lowerCamelCase_ =[self.sep_token_id] lowerCamelCase_ =[self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCAmelCase, token_ids_a=lowerCAmelCase, already_has_special_tokens=lowerCAmelCase ) if token_ids_a is not None: return ([0] * len(lowerCAmelCase )) + [1] + ([0] * len(lowerCAmelCase )) + [1, 1] return ([0] * len(lowerCAmelCase )) + [1, 1] def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" lowerCamelCase_ =[self.sep_token_id] lowerCamelCase_ =[2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" if not os.path.isdir(lowerCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCamelCase_ =os.path.join( lowerCAmelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file, lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(lowerCAmelCase, '''wb''' ) as fi: lowerCamelCase_ =self.sp_model.serialized_model_proto() fi.write(lowerCAmelCase ) return (out_vocab_file,) def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =super()._decode(*lowerCAmelCase, **lowerCAmelCase ) lowerCamelCase_ =text.replace(''' ''', '''''' ).replace('''\u2582''', ''' ''' ).replace('''\u2583''', '''\n''' ) return text
75
'''simple docstring''' import argparse import os import re import torch from flax.traverse_util import flatten_dict from tax import checkpoints from transformers import ( AutoTokenizer, PixaStructConfig, PixaStructForConditionalGeneration, PixaStructImageProcessor, PixaStructProcessor, PixaStructTextConfig, PixaStructVisionConfig, ) def a_ ( __snake_case : Any ) -> int: """simple docstring""" lowerCamelCase_ =checkpoints.load_tax_checkpoint(__snake_case ) lowerCamelCase_ =flatten_dict(__snake_case ) return flax_params def a_ ( __snake_case : Dict ) -> Optional[int]: """simple docstring""" lowerCamelCase_ ={} lowerCamelCase_ ={ '''token_embedder''': '''embeddings''', '''encoder_norm''': '''layernorm''', '''kernel''': '''weight''', '''.out''': '''.output''', '''scale''': '''weight''', '''embedders_0.pos_embedding''': '''row_embedder.weight''', '''embedders_1.pos_embedding''': '''column_embedder.weight''', } lowerCamelCase_ ={ '''query''': '''attention.query''', '''key''': '''attention.key''', '''value''': '''attention.value''', '''output.dense''': '''output''', '''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''', '''pre_self_attention_layer_norm''': '''self_attention.layer_norm''', '''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''', '''mlp.''': '''mlp.DenseReluDense.''', '''pre_mlp_layer_norm''': '''mlp.layer_norm''', '''self_attention.o''': '''self_attention.attention.o''', '''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''', '''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''', '''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''', '''decoder.logits_dense.weight''': '''decoder.lm_head.weight''', } for key in flax_dict.keys(): if "target" in key: # remove the first prefix from the key lowerCamelCase_ ='''.'''.join(key[1:] ) # rename the key for old, new in CONVERSION_MAPPING.items(): lowerCamelCase_ =new_key.replace(__snake_case , __snake_case ) if "decoder" in new_key: for old, new in DECODER_CONVERSION_MAPPING.items(): lowerCamelCase_ =new_key.replace(__snake_case , __snake_case ) if "layers" in new_key and "decoder" not in new_key: # use regex to replace the layer number lowerCamelCase_ =re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , __snake_case ) lowerCamelCase_ =new_key.replace('''encoder''' , '''encoder.encoder''' ) elif "layers" in new_key and "decoder" in new_key: # use regex to replace the layer number lowerCamelCase_ =re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , __snake_case ) lowerCamelCase_ =flax_dict[key] lowerCamelCase_ ={} # convert converted_dict into torch format for key in converted_dict.keys(): if ("embed_tokens" not in key) and ("embedder" not in key): lowerCamelCase_ =torch.from_numpy(converted_dict[key].T ) else: lowerCamelCase_ =torch.from_numpy(converted_dict[key] ) return converted_torch_dict def a_ ( __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Any=False , __snake_case : Optional[int]=False ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ =get_flax_param(__snake_case ) if not use_large: lowerCamelCase_ =PixaStructVisionConfig() lowerCamelCase_ =PixaStructTextConfig() else: lowerCamelCase_ =PixaStructVisionConfig( hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 ) lowerCamelCase_ =PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 ) lowerCamelCase_ =PixaStructConfig( vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=__snake_case ) lowerCamelCase_ =PixaStructForConditionalGeneration(__snake_case ) lowerCamelCase_ =rename_and_convert_flax_params(__snake_case ) model.load_state_dict(__snake_case ) lowerCamelCase_ =AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' ) lowerCamelCase_ =PixaStructImageProcessor() lowerCamelCase_ =PixaStructProcessor(image_processor=__snake_case , tokenizer=__snake_case ) if use_large: lowerCamelCase_ =4096 lowerCamelCase_ =True # mkdir if needed os.makedirs(__snake_case , exist_ok=__snake_case ) model.save_pretrained(__snake_case ) processor.save_pretrained(__snake_case ) print('''Model saved in {}'''.format(__snake_case ) ) if __name__ == "__main__": a_ : Optional[int] = argparse.ArgumentParser() parser.add_argument("""--t5x_checkpoint_path""", default=None, type=str, help="""Path to the original T5x checkpoint.""") parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--use_large""", action="""store_true""", help="""Use large model.""") parser.add_argument("""--is_vqa""", action="""store_true""", help="""Use large model.""") a_ : Tuple = parser.parse_args() convert_pixastruct_original_pytorch_checkpoint_to_hf( args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large )
75
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) a_ : List[str] = {"""configuration_unispeech""": ["""UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP""", """UniSpeechConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Any = [ """UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST""", """UniSpeechForCTC""", """UniSpeechForPreTraining""", """UniSpeechForSequenceClassification""", """UniSpeechModel""", """UniSpeechPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys a_ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
75
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging a_ : Union[str, Any] = logging.get_logger(__name__) if is_vision_available(): import PIL class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Tuple =['pixel_values'] def __init__( self, lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = PILImageResampling.BICUBIC, lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = True, lowerCAmelCase = 1 / 255, lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = True, **lowerCAmelCase, ): """simple docstring""" super().__init__(**lowerCAmelCase ) lowerCamelCase_ =size if size is not None else {'''shortest_edge''': 224} lowerCamelCase_ =get_size_dict(lowerCAmelCase, default_to_square=lowerCAmelCase ) lowerCamelCase_ =crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} lowerCamelCase_ =get_size_dict(lowerCAmelCase, default_to_square=lowerCAmelCase, param_name='''crop_size''' ) lowerCamelCase_ =do_resize lowerCamelCase_ =size lowerCamelCase_ =resample lowerCamelCase_ =do_center_crop lowerCamelCase_ =crop_size lowerCamelCase_ =do_rescale lowerCamelCase_ =rescale_factor lowerCamelCase_ =do_normalize lowerCamelCase_ =image_mean if image_mean is not None else OPENAI_CLIP_MEAN lowerCamelCase_ =image_std if image_std is not None else OPENAI_CLIP_STD lowerCamelCase_ =do_convert_rgb def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = PILImageResampling.BICUBIC, lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =get_size_dict(lowerCAmelCase, default_to_square=lowerCAmelCase ) if "shortest_edge" not in size: raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) lowerCamelCase_ =get_resize_output_image_size(lowerCAmelCase, size=size['''shortest_edge'''], default_to_square=lowerCAmelCase ) return resize(lowerCAmelCase, size=lowerCAmelCase, resample=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =get_size_dict(lowerCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(lowerCAmelCase, size=(size['''height'''], size['''width''']), data_format=lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" return rescale(lowerCAmelCase, scale=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" return normalize(lowerCAmelCase, mean=lowerCAmelCase, std=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = ChannelDimension.FIRST, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =do_resize if do_resize is not None else self.do_resize lowerCamelCase_ =size if size is not None else self.size lowerCamelCase_ =get_size_dict(lowerCAmelCase, param_name='''size''', default_to_square=lowerCAmelCase ) lowerCamelCase_ =resample if resample is not None else self.resample lowerCamelCase_ =do_center_crop if do_center_crop is not None else self.do_center_crop lowerCamelCase_ =crop_size if crop_size is not None else self.crop_size lowerCamelCase_ =get_size_dict(lowerCAmelCase, param_name='''crop_size''', default_to_square=lowerCAmelCase ) lowerCamelCase_ =do_rescale if do_rescale is not None else self.do_rescale lowerCamelCase_ =rescale_factor if rescale_factor is not None else self.rescale_factor lowerCamelCase_ =do_normalize if do_normalize is not None else self.do_normalize lowerCamelCase_ =image_mean if image_mean is not None else self.image_mean lowerCamelCase_ =image_std if image_std is not None else self.image_std lowerCamelCase_ =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb lowerCamelCase_ =make_list_of_images(lowerCAmelCase ) if not valid_images(lowerCAmelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: lowerCamelCase_ =[convert_to_rgb(lowerCAmelCase ) for image in images] # All transformations expect numpy arrays. lowerCamelCase_ =[to_numpy_array(lowerCAmelCase ) for image in images] if do_resize: lowerCamelCase_ =[self.resize(image=lowerCAmelCase, size=lowerCAmelCase, resample=lowerCAmelCase ) for image in images] if do_center_crop: lowerCamelCase_ =[self.center_crop(image=lowerCAmelCase, size=lowerCAmelCase ) for image in images] if do_rescale: lowerCamelCase_ =[self.rescale(image=lowerCAmelCase, scale=lowerCAmelCase ) for image in images] if do_normalize: lowerCamelCase_ =[self.normalize(image=lowerCAmelCase, mean=lowerCAmelCase, std=lowerCAmelCase ) for image in images] lowerCamelCase_ =[to_channel_dimension_format(lowerCAmelCase, lowerCAmelCase ) for image in images] lowerCamelCase_ ={'''pixel_values''': images} return BatchFeature(data=lowerCAmelCase, tensor_type=lowerCAmelCase )
75
1
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() a_ : str = logging.get_logger(__name__) a_ : List[Any] = { """post_extract_proj""": """feature_projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.upsample.0""": """encoder.upsample.projection""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """layer_norm""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } def a_ ( __snake_case : Dict , __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : Union[str, Any] ) -> List[Any]: """simple docstring""" for attribute in key.split('''.''' ): lowerCamelCase_ =getattr(__snake_case , __snake_case ) if weight_type is not None: lowerCamelCase_ =getattr(__snake_case , __snake_case ).shape else: lowerCamelCase_ =hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": lowerCamelCase_ =value elif weight_type == "weight_g": lowerCamelCase_ =value elif weight_type == "weight_v": lowerCamelCase_ =value elif weight_type == "bias": lowerCamelCase_ =value else: lowerCamelCase_ =value logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def a_ ( __snake_case : int , __snake_case : Optional[int] , __snake_case : Optional[int] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ =fairseq_model.state_dict() lowerCamelCase_ =hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): lowerCamelCase_ =False if "conv_layers" in name: load_conv_layer( __snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == '''group''' , ) lowerCamelCase_ =True else: for key, mapped_key in MAPPING.items(): lowerCamelCase_ ='''sew.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: lowerCamelCase_ =True if "*" in mapped_key: lowerCamelCase_ =name.split(__snake_case )[0].split('''.''' )[-2] lowerCamelCase_ =mapped_key.replace('''*''' , __snake_case ) if "weight_g" in name: lowerCamelCase_ ='''weight_g''' elif "weight_v" in name: lowerCamelCase_ ='''weight_v''' elif "weight" in name: lowerCamelCase_ ='''weight''' elif "bias" in name: lowerCamelCase_ ='''bias''' else: lowerCamelCase_ =None set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) continue if not is_used: unused_weights.append(__snake_case ) logger.warning(F'''Unused weights: {unused_weights}''' ) def a_ ( __snake_case : Tuple , __snake_case : Any , __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : Dict ) -> str: """simple docstring""" lowerCamelCase_ =full_name.split('''conv_layers.''' )[-1] lowerCamelCase_ =name.split('''.''' ) lowerCamelCase_ =int(items[0] ) lowerCamelCase_ =int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) lowerCamelCase_ =value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) lowerCamelCase_ =value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) lowerCamelCase_ =value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) lowerCamelCase_ =value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(__snake_case ) def a_ ( __snake_case : List[str] , __snake_case : List[Any] ) -> Tuple: """simple docstring""" lowerCamelCase_ =SEWConfig() if is_finetuned: lowerCamelCase_ =model.wav_encoder.wav_model.cfg else: lowerCamelCase_ =model.cfg lowerCamelCase_ =fs_config.conv_bias lowerCamelCase_ =eval(fs_config.conv_feature_layers ) lowerCamelCase_ =[x[0] for x in conv_layers] lowerCamelCase_ =[x[1] for x in conv_layers] lowerCamelCase_ =[x[2] for x in conv_layers] lowerCamelCase_ ='''gelu''' lowerCamelCase_ ='''layer''' if fs_config.extractor_mode == '''layer_norm''' else '''group''' lowerCamelCase_ =0.0 lowerCamelCase_ =fs_config.activation_fn.name lowerCamelCase_ =fs_config.encoder_embed_dim lowerCamelCase_ =0.0_2 lowerCamelCase_ =fs_config.encoder_ffn_embed_dim lowerCamelCase_ =1e-5 lowerCamelCase_ =fs_config.encoder_layerdrop lowerCamelCase_ =fs_config.encoder_attention_heads lowerCamelCase_ =fs_config.conv_pos_groups lowerCamelCase_ =fs_config.conv_pos lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =fs_config.encoder_layers lowerCamelCase_ =fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: lowerCamelCase_ =model.cfg lowerCamelCase_ =fs_config.final_dropout lowerCamelCase_ =fs_config.layerdrop lowerCamelCase_ =fs_config.activation_dropout lowerCamelCase_ =fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 lowerCamelCase_ =fs_config.attention_dropout lowerCamelCase_ =fs_config.dropout_input lowerCamelCase_ =fs_config.dropout lowerCamelCase_ =fs_config.mask_channel_length lowerCamelCase_ =fs_config.mask_channel_prob lowerCamelCase_ =fs_config.mask_length lowerCamelCase_ =fs_config.mask_prob lowerCamelCase_ ='''Wav2Vec2FeatureExtractor''' lowerCamelCase_ ='''Wav2Vec2CTCTokenizer''' return config @torch.no_grad() def a_ ( __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : str=None , __snake_case : str=None , __snake_case : Dict=True ) -> List[str]: """simple docstring""" if is_finetuned: lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: lowerCamelCase_ =SEWConfig.from_pretrained(__snake_case ) else: lowerCamelCase_ =convert_config(model[0] , __snake_case ) lowerCamelCase_ =model[0].eval() lowerCamelCase_ =True if config.feat_extract_norm == '''layer''' else False lowerCamelCase_ =WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__snake_case , return_attention_mask=__snake_case , ) if is_finetuned: if dict_path: lowerCamelCase_ =Dictionary.load(__snake_case ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq lowerCamelCase_ =target_dict.pad_index lowerCamelCase_ =target_dict.bos_index lowerCamelCase_ =target_dict.pad_index lowerCamelCase_ =target_dict.bos_index lowerCamelCase_ =target_dict.eos_index lowerCamelCase_ =len(target_dict.symbols ) lowerCamelCase_ =os.path.join(__snake_case , '''vocab.json''' ) if not os.path.isdir(__snake_case ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__snake_case ) ) return os.makedirs(__snake_case , exist_ok=__snake_case ) with open(__snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(target_dict.indices , __snake_case ) lowerCamelCase_ =WavaVecaCTCTokenizer( __snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__snake_case , ) lowerCamelCase_ =WavaVecaProcessor(feature_extractor=__snake_case , tokenizer=__snake_case ) processor.save_pretrained(__snake_case ) lowerCamelCase_ =SEWForCTC(__snake_case ) else: lowerCamelCase_ =SEWModel(__snake_case ) feature_extractor.save_pretrained(__snake_case ) recursively_load_weights(__snake_case , __snake_case , __snake_case ) hf_model.save_pretrained(__snake_case ) if __name__ == "__main__": a_ : List[Any] = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) a_ : List[Any] = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
75
'''simple docstring''' from __future__ import annotations def a_ ( __snake_case : str , __snake_case : list[str] | None = None , __snake_case : dict[str, float] | None = None , __snake_case : bool = False , ) -> tuple[int, float, str]: """simple docstring""" lowerCamelCase_ =cipher_alphabet or [chr(__snake_case ) for i in range(97 , 123 )] # If the argument is None or the user provided an empty dictionary if not frequencies_dict: # Frequencies of letters in the english language (how much they show up) lowerCamelCase_ ={ '''a''': 0.0_8_4_9_7, '''b''': 0.0_1_4_9_2, '''c''': 0.0_2_2_0_2, '''d''': 0.0_4_2_5_3, '''e''': 0.1_1_1_6_2, '''f''': 0.0_2_2_2_8, '''g''': 0.0_2_0_1_5, '''h''': 0.0_6_0_9_4, '''i''': 0.0_7_5_4_6, '''j''': 0.0_0_1_5_3, '''k''': 0.0_1_2_9_2, '''l''': 0.0_4_0_2_5, '''m''': 0.0_2_4_0_6, '''n''': 0.0_6_7_4_9, '''o''': 0.0_7_5_0_7, '''p''': 0.0_1_9_2_9, '''q''': 0.0_0_0_9_5, '''r''': 0.0_7_5_8_7, '''s''': 0.0_6_3_2_7, '''t''': 0.0_9_3_5_6, '''u''': 0.0_2_7_5_8, '''v''': 0.0_0_9_7_8, '''w''': 0.0_2_5_6_0, '''x''': 0.0_0_1_5_0, '''y''': 0.0_1_9_9_4, '''z''': 0.0_0_0_7_7, } else: # Custom frequencies dictionary lowerCamelCase_ =frequencies_dict if not case_sensitive: lowerCamelCase_ =ciphertext.lower() # Chi squared statistic values lowerCamelCase_ ={} # cycle through all of the shifts for shift in range(len(__snake_case ) ): lowerCamelCase_ ='''''' # decrypt the message with the shift for letter in ciphertext: try: # Try to index the letter in the alphabet lowerCamelCase_ =(alphabet_letters.index(letter.lower() ) - shift) % len( __snake_case ) decrypted_with_shift += ( alphabet_letters[new_key].upper() if case_sensitive and letter.isupper() else alphabet_letters[new_key] ) except ValueError: # Append the character if it isn't in the alphabet decrypted_with_shift += letter lowerCamelCase_ =0.0 # Loop through each letter in the decoded message with the shift for letter in decrypted_with_shift: if case_sensitive: lowerCamelCase_ =letter.lower() if letter in frequencies: # Get the amount of times the letter occurs in the message lowerCamelCase_ =decrypted_with_shift.lower().count(__snake_case ) # Get the excepcted amount of times the letter should appear based # on letter frequencies lowerCamelCase_ =frequencies[letter] * occurrences # Complete the chi squared statistic formula lowerCamelCase_ =((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value else: if letter.lower() in frequencies: # Get the amount of times the letter occurs in the message lowerCamelCase_ =decrypted_with_shift.count(__snake_case ) # Get the excepcted amount of times the letter should appear based # on letter frequencies lowerCamelCase_ =frequencies[letter] * occurrences # Complete the chi squared statistic formula lowerCamelCase_ =((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary lowerCamelCase_ =( chi_squared_statistic, decrypted_with_shift, ) # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic def chi_squared_statistic_values_sorting_key(__snake_case : int ) -> tuple[float, str]: return chi_squared_statistic_values[key] lowerCamelCase_ =min( __snake_case , key=__snake_case , ) # Get all the data from the most likely cipher (key, decoded message) ( ( lowerCamelCase_ ), ( lowerCamelCase_ ), ) =chi_squared_statistic_values[most_likely_cipher] # Return the data on the most likely shift return ( most_likely_cipher, most_likely_cipher_chi_squared_value, decoded_most_likely_cipher, )
75
1
'''simple docstring''' from __future__ import annotations a_ : str = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0] a_ : str = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1] def a_ ( __snake_case : list[float] ) -> list[float]: """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ =len(__snake_case ) for i in range(__snake_case ): lowerCamelCase_ =-1 for j in range(i + 1 , __snake_case ): if arr[i] < arr[j]: lowerCamelCase_ =arr[j] break result.append(__snake_case ) return result def a_ ( __snake_case : list[float] ) -> list[float]: """simple docstring""" lowerCamelCase_ =[] for i, outer in enumerate(__snake_case ): lowerCamelCase_ =-1 for inner in arr[i + 1 :]: if outer < inner: lowerCamelCase_ =inner break result.append(__snake_case ) return result def a_ ( __snake_case : list[float] ) -> list[float]: """simple docstring""" lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =[] lowerCamelCase_ =[-1] * arr_size for index in reversed(range(__snake_case ) ): if stack: while stack[-1] <= arr[index]: stack.pop() if not stack: break if stack: lowerCamelCase_ =stack[-1] stack.append(arr[index] ) return result if __name__ == "__main__": from doctest import testmod from timeit import timeit testmod() print(next_greatest_element_slow(arr)) print(next_greatest_element_fast(arr)) print(next_greatest_element(arr)) a_ : Dict = ( """from __main__ import arr, next_greatest_element_slow, """ """next_greatest_element_fast, next_greatest_element""" ) print( """next_greatest_element_slow():""", timeit("""next_greatest_element_slow(arr)""", setup=setup), ) print( """next_greatest_element_fast():""", timeit("""next_greatest_element_fast(arr)""", setup=setup), ) print( """ next_greatest_element():""", timeit("""next_greatest_element(arr)""", setup=setup), )
75
'''simple docstring''' import importlib import inspect import json import os import re import shutil import sys from pathlib import Path from typing import Dict, Optional, Union from urllib import request from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info from packaging import version from .. import __version__ from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging a_ : List[Any] = ( """https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py""" ) a_ : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name def a_ ( ) -> List[str]: """simple docstring""" lowerCamelCase_ ='''https://pypi.org/pypi/diffusers/json''' lowerCamelCase_ =json.loads(request.urlopen(__snake_case ).read() )['''releases'''].keys() return sorted(__snake_case , key=lambda __snake_case : version.Version(__snake_case ) ) def a_ ( ) -> str: """simple docstring""" # This function has already been executed if HF_MODULES_CACHE already is in the Python path. if HF_MODULES_CACHE in sys.path: return sys.path.append(__snake_case ) os.makedirs(__snake_case , exist_ok=__snake_case ) lowerCamelCase_ =Path(__snake_case ) / '''__init__.py''' if not init_path.exists(): init_path.touch() def a_ ( __snake_case : Union[str, os.PathLike] ) -> List[str]: """simple docstring""" init_hf_modules() lowerCamelCase_ =Path(__snake_case ) / name # If the parent module does not exist yet, recursively create it. if not dynamic_module_path.parent.exists(): create_dynamic_module(dynamic_module_path.parent ) os.makedirs(__snake_case , exist_ok=__snake_case ) lowerCamelCase_ =dynamic_module_path / '''__init__.py''' if not init_path.exists(): init_path.touch() def a_ ( __snake_case : Tuple ) -> List[str]: """simple docstring""" with open(__snake_case , '''r''' , encoding='''utf-8''' ) as f: lowerCamelCase_ =f.read() # Imports of the form `import .xxx` lowerCamelCase_ =re.findall('''^\s*import\s+\.(\S+)\s*$''' , __snake_case , flags=re.MULTILINE ) # Imports of the form `from .xxx import yyy` relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , __snake_case , flags=re.MULTILINE ) # Unique-ify return list(set(__snake_case ) ) def a_ ( __snake_case : str ) -> str: """simple docstring""" lowerCamelCase_ =False lowerCamelCase_ =[module_file] lowerCamelCase_ =[] # Let's recurse through all relative imports while not no_change: lowerCamelCase_ =[] for f in files_to_check: new_imports.extend(get_relative_imports(__snake_case ) ) lowerCamelCase_ =Path(__snake_case ).parent lowerCamelCase_ =[str(module_path / m ) for m in new_imports] lowerCamelCase_ =[f for f in new_import_files if f not in all_relative_imports] lowerCamelCase_ =[F'''{f}.py''' for f in new_import_files] lowerCamelCase_ =len(__snake_case ) == 0 all_relative_imports.extend(__snake_case ) return all_relative_imports def a_ ( __snake_case : Union[str, Any] ) -> Optional[int]: """simple docstring""" with open(__snake_case , '''r''' , encoding='''utf-8''' ) as f: lowerCamelCase_ =f.read() # Imports of the form `import xxx` lowerCamelCase_ =re.findall('''^\s*import\s+(\S+)\s*$''' , __snake_case , flags=re.MULTILINE ) # Imports of the form `from xxx import yyy` imports += re.findall('''^\s*from\s+(\S+)\s+import''' , __snake_case , flags=re.MULTILINE ) # Only keep the top-level module lowerCamelCase_ =[imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )] # Unique-ify and test we got them all lowerCamelCase_ =list(set(__snake_case ) ) lowerCamelCase_ =[] for imp in imports: try: importlib.import_module(__snake_case ) except ImportError: missing_packages.append(__snake_case ) if len(__snake_case ) > 0: raise ImportError( '''This modeling file requires the following packages that were not found in your environment: ''' F'''{', '.join(__snake_case )}. Run `pip install {' '.join(__snake_case )}`''' ) return get_relative_imports(__snake_case ) def a_ ( __snake_case : Tuple , __snake_case : Tuple ) -> List[Any]: """simple docstring""" lowerCamelCase_ =module_path.replace(os.path.sep , '''.''' ) lowerCamelCase_ =importlib.import_module(__snake_case ) if class_name is None: return find_pipeline_class(__snake_case ) return getattr(__snake_case , __snake_case ) def a_ ( __snake_case : Dict ) -> Any: """simple docstring""" from ..pipelines import DiffusionPipeline lowerCamelCase_ =dict(inspect.getmembers(__snake_case , inspect.isclass ) ) lowerCamelCase_ =None for cls_name, cls in cls_members.items(): if ( cls_name != DiffusionPipeline.__name__ and issubclass(cls , __snake_case ) and cls.__module__.split('''.''' )[0] != "diffusers" ): if pipeline_class is not None: raise ValueError( F'''Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:''' F''' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in''' F''' {loaded_module}.''' ) lowerCamelCase_ =cls return pipeline_class def a_ ( __snake_case : Union[str, os.PathLike] , __snake_case : str , __snake_case : Optional[Union[str, os.PathLike]] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[Dict[str, str]] = None , __snake_case : Optional[Union[bool, str]] = None , __snake_case : Optional[str] = None , __snake_case : bool = False , ) -> Optional[int]: """simple docstring""" lowerCamelCase_ =str(__snake_case ) lowerCamelCase_ =os.path.join(__snake_case , __snake_case ) if os.path.isfile(__snake_case ): lowerCamelCase_ =module_file_or_url lowerCamelCase_ ='''local''' elif pretrained_model_name_or_path.count('''/''' ) == 0: lowerCamelCase_ =get_diffusers_versions() # cut ".dev0" lowerCamelCase_ ='''v''' + '''.'''.join(__version__.split('''.''' )[:3] ) # retrieve github version that matches if revision is None: lowerCamelCase_ =latest_version if latest_version[1:] in available_versions else '''main''' logger.info(F'''Defaulting to latest_version: {revision}.''' ) elif revision in available_versions: lowerCamelCase_ =F'''v{revision}''' elif revision == "main": lowerCamelCase_ =revision else: raise ValueError( F'''`custom_revision`: {revision} does not exist. Please make sure to choose one of''' F''' {', '.join(available_versions + ['main'] )}.''' ) # community pipeline on GitHub lowerCamelCase_ =COMMUNITY_PIPELINES_URL.format(revision=__snake_case , pipeline=__snake_case ) try: lowerCamelCase_ =cached_download( __snake_case , cache_dir=__snake_case , force_download=__snake_case , proxies=__snake_case , resume_download=__snake_case , local_files_only=__snake_case , use_auth_token=__snake_case , ) lowerCamelCase_ ='''git''' lowerCamelCase_ =pretrained_model_name_or_path + '''.py''' except EnvironmentError: logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' ) raise else: try: # Load from URL or cache if already cached lowerCamelCase_ =hf_hub_download( __snake_case , __snake_case , cache_dir=__snake_case , force_download=__snake_case , proxies=__snake_case , resume_download=__snake_case , local_files_only=__snake_case , use_auth_token=__snake_case , ) lowerCamelCase_ =os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) ) except EnvironmentError: logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' ) raise # Check we have all the requirements in our environment lowerCamelCase_ =check_imports(__snake_case ) # Now we move the module inside our cached dynamic modules. lowerCamelCase_ =DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule create_dynamic_module(__snake_case ) lowerCamelCase_ =Path(__snake_case ) / full_submodule if submodule == "local" or submodule == "git": # We always copy local files (we could hash the file to see if there was a change, and give them the name of # that hash, to only copy when there is a modification but it seems overkill for now). # The only reason we do the copy is to avoid putting too many folders in sys.path. shutil.copy(__snake_case , submodule_path / module_file ) for module_needed in modules_needed: lowerCamelCase_ =F'''{module_needed}.py''' shutil.copy(os.path.join(__snake_case , __snake_case ) , submodule_path / module_needed ) else: # Get the commit hash # TODO: we will get this info in the etag soon, so retrieve it from there and not here. if isinstance(__snake_case , __snake_case ): lowerCamelCase_ =use_auth_token elif use_auth_token is True: lowerCamelCase_ =HfFolder.get_token() else: lowerCamelCase_ =None lowerCamelCase_ =model_info(__snake_case , revision=__snake_case , token=__snake_case ).sha # The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the # benefit of versioning. lowerCamelCase_ =submodule_path / commit_hash lowerCamelCase_ =full_submodule + os.path.sep + commit_hash create_dynamic_module(__snake_case ) if not (submodule_path / module_file).exists(): shutil.copy(__snake_case , submodule_path / module_file ) # Make sure we also have every file with relative for module_needed in modules_needed: if not (submodule_path / module_needed).exists(): get_cached_module_file( __snake_case , F'''{module_needed}.py''' , cache_dir=__snake_case , force_download=__snake_case , resume_download=__snake_case , proxies=__snake_case , use_auth_token=__snake_case , revision=__snake_case , local_files_only=__snake_case , ) return os.path.join(__snake_case , __snake_case ) def a_ ( __snake_case : Union[str, os.PathLike] , __snake_case : str , __snake_case : Optional[str] = None , __snake_case : Optional[Union[str, os.PathLike]] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[Dict[str, str]] = None , __snake_case : Optional[Union[bool, str]] = None , __snake_case : Optional[str] = None , __snake_case : bool = False , **__snake_case : Optional[int] , ) -> Optional[int]: """simple docstring""" lowerCamelCase_ =get_cached_module_file( __snake_case , __snake_case , cache_dir=__snake_case , force_download=__snake_case , resume_download=__snake_case , proxies=__snake_case , use_auth_token=__snake_case , revision=__snake_case , local_files_only=__snake_case , ) return get_class_in_module(__snake_case , final_module.replace('''.py''' , '''''' ) )
75
1
'''simple docstring''' import argparse import requests import torch from PIL import Image from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor def a_ ( __snake_case : int ) -> Tuple: """simple docstring""" if "cls_token" in name: lowerCamelCase_ =name.replace('''cls_token''' , '''vit.embeddings.cls_token''' ) if "mask_token" in name: lowerCamelCase_ =name.replace('''mask_token''' , '''decoder.mask_token''' ) if "decoder_pos_embed" in name: lowerCamelCase_ =name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' ) if "pos_embed" in name and "decoder" not in name: lowerCamelCase_ =name.replace('''pos_embed''' , '''vit.embeddings.position_embeddings''' ) if "patch_embed.proj" in name: lowerCamelCase_ =name.replace('''patch_embed.proj''' , '''vit.embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: lowerCamelCase_ =name.replace('''patch_embed.norm''' , '''vit.embeddings.norm''' ) if "decoder_blocks" in name: lowerCamelCase_ =name.replace('''decoder_blocks''' , '''decoder.decoder_layers''' ) if "blocks" in name: lowerCamelCase_ =name.replace('''blocks''' , '''vit.encoder.layer''' ) if "attn.proj" in name: lowerCamelCase_ =name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: lowerCamelCase_ =name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: lowerCamelCase_ =name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: lowerCamelCase_ =name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: lowerCamelCase_ =name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: lowerCamelCase_ =name.replace('''mlp.fc2''' , '''output.dense''' ) if "decoder_embed" in name: lowerCamelCase_ =name.replace('''decoder_embed''' , '''decoder.decoder_embed''' ) if "decoder_norm" in name: lowerCamelCase_ =name.replace('''decoder_norm''' , '''decoder.decoder_norm''' ) if "decoder_pred" in name: lowerCamelCase_ =name.replace('''decoder_pred''' , '''decoder.decoder_pred''' ) if "norm.weight" in name and "decoder" not in name: lowerCamelCase_ =name.replace('''norm.weight''' , '''vit.layernorm.weight''' ) if "norm.bias" in name and "decoder" not in name: lowerCamelCase_ =name.replace('''norm.bias''' , '''vit.layernorm.bias''' ) return name def a_ ( __snake_case : Dict , __snake_case : Union[str, Any] ) -> List[str]: """simple docstring""" for key in orig_state_dict.copy().keys(): lowerCamelCase_ =orig_state_dict.pop(__snake_case ) if "qkv" in key: lowerCamelCase_ =key.split('''.''' ) lowerCamelCase_ =int(key_split[1] ) if "decoder_blocks" in key: lowerCamelCase_ =config.decoder_hidden_size lowerCamelCase_ ='''decoder.decoder_layers.''' if "weight" in key: lowerCamelCase_ =val[:dim, :] lowerCamelCase_ =val[dim : dim * 2, :] lowerCamelCase_ =val[-dim:, :] elif "bias" in key: lowerCamelCase_ =val[:dim] lowerCamelCase_ =val[dim : dim * 2] lowerCamelCase_ =val[-dim:] else: lowerCamelCase_ =config.hidden_size lowerCamelCase_ ='''vit.encoder.layer.''' if "weight" in key: lowerCamelCase_ =val[:dim, :] lowerCamelCase_ =val[dim : dim * 2, :] lowerCamelCase_ =val[-dim:, :] elif "bias" in key: lowerCamelCase_ =val[:dim] lowerCamelCase_ =val[dim : dim * 2] lowerCamelCase_ =val[-dim:] else: lowerCamelCase_ =val return orig_state_dict def a_ ( __snake_case : Tuple , __snake_case : List[str] ) -> str: """simple docstring""" lowerCamelCase_ =ViTMAEConfig() if "large" in checkpoint_url: lowerCamelCase_ =1024 lowerCamelCase_ =4096 lowerCamelCase_ =24 lowerCamelCase_ =16 elif "huge" in checkpoint_url: lowerCamelCase_ =14 lowerCamelCase_ =1280 lowerCamelCase_ =5120 lowerCamelCase_ =32 lowerCamelCase_ =16 lowerCamelCase_ =ViTMAEForPreTraining(__snake_case ) lowerCamelCase_ =torch.hub.load_state_dict_from_url(__snake_case , map_location='''cpu''' )['''model'''] lowerCamelCase_ =ViTMAEImageProcessor(size=config.image_size ) lowerCamelCase_ =convert_state_dict(__snake_case , __snake_case ) model.load_state_dict(__snake_case ) model.eval() lowerCamelCase_ ='''https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg''' lowerCamelCase_ =Image.open(requests.get(__snake_case , stream=__snake_case ).raw ) lowerCamelCase_ =ViTMAEImageProcessor(size=config.image_size ) lowerCamelCase_ =image_processor(images=__snake_case , return_tensors='''pt''' ) # forward pass torch.manual_seed(2 ) lowerCamelCase_ =model(**__snake_case ) lowerCamelCase_ =outputs.logits if "large" in checkpoint_url: lowerCamelCase_ =torch.tensor( [[-0.7_3_0_9, -0.7_1_2_8, -1.0_1_6_9], [-1.0_1_6_1, -0.9_0_5_8, -1.1_8_7_8], [-1.0_4_7_8, -0.9_4_1_1, -1.1_9_1_1]] ) elif "huge" in checkpoint_url: lowerCamelCase_ =torch.tensor( [[-1.1_5_9_9, -0.9_1_9_9, -1.2_2_2_1], [-1.1_9_5_2, -0.9_2_6_9, -1.2_3_0_7], [-1.2_1_4_3, -0.9_3_3_7, -1.2_2_6_2]] ) else: lowerCamelCase_ =torch.tensor( [[-0.9_1_9_2, -0.8_4_8_1, -1.1_2_5_9], [-1.1_3_4_9, -1.0_0_3_4, -1.2_5_9_9], [-1.1_7_5_7, -1.0_4_2_9, -1.2_7_2_6]] ) # verify logits assert torch.allclose(logits[0, :3, :3] , __snake_case , atol=1e-4 ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(__snake_case ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__snake_case ) if __name__ == "__main__": a_ : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint_url""", default="""https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth""", type=str, help="""URL of the checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) a_ : Optional[int] = parser.parse_args() convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
75
'''simple docstring''' a_ : Any = [ 9_99, 8_00, 7_99, 6_00, 5_99, 5_00, 4_00, 3_99, 3_77, 3_55, 3_33, 3_11, 2_88, 2_66, 2_44, 2_22, 2_00, 1_99, 1_77, 1_55, 1_33, 1_11, 88, 66, 44, 22, 0, ] a_ : Any = [ 9_99, 9_76, 9_52, 9_28, 9_05, 8_82, 8_58, 8_57, 8_10, 7_62, 7_15, 7_14, 5_72, 4_29, 4_28, 2_86, 2_85, 2_38, 1_90, 1_43, 1_42, 1_18, 95, 71, 47, 24, 0, ] a_ : Optional[Any] = [ 9_99, 9_88, 9_77, 9_66, 9_55, 9_44, 9_33, 9_22, 9_11, 9_00, 8_99, 8_79, 8_59, 8_40, 8_20, 8_00, 7_99, 7_66, 7_33, 7_00, 6_99, 6_50, 6_00, 5_99, 5_00, 4_99, 4_00, 3_99, 3_50, 3_00, 2_99, 2_66, 2_33, 2_00, 1_99, 1_79, 1_59, 1_40, 1_20, 1_00, 99, 88, 77, 66, 55, 44, 33, 22, 11, 0, ] a_ : str = [ 9_99, 9_95, 9_92, 9_89, 9_85, 9_81, 9_78, 9_75, 9_71, 9_67, 9_64, 9_61, 9_57, 9_56, 9_51, 9_47, 9_42, 9_37, 9_33, 9_28, 9_23, 9_19, 9_14, 9_13, 9_08, 9_03, 8_97, 8_92, 8_87, 8_81, 8_76, 8_71, 8_70, 8_64, 8_58, 8_52, 8_46, 8_40, 8_34, 8_28, 8_27, 8_20, 8_13, 8_06, 7_99, 7_92, 7_85, 7_84, 7_77, 7_70, 7_63, 7_56, 7_49, 7_42, 7_41, 7_33, 7_24, 7_16, 7_07, 6_99, 6_98, 6_88, 6_77, 6_66, 6_56, 6_55, 6_45, 6_34, 6_23, 6_13, 6_12, 5_98, 5_84, 5_70, 5_69, 5_55, 5_41, 5_27, 5_26, 5_05, 4_84, 4_83, 4_62, 4_40, 4_39, 3_96, 3_95, 3_52, 3_51, 3_08, 3_07, 2_64, 2_63, 2_20, 2_19, 1_76, 1_32, 88, 44, 0, ] a_ : Optional[int] = [ 9_99, 9_97, 9_95, 9_92, 9_90, 9_88, 9_86, 9_84, 9_81, 9_79, 9_77, 9_75, 9_72, 9_70, 9_68, 9_66, 9_64, 9_61, 9_59, 9_57, 9_56, 9_54, 9_51, 9_49, 9_46, 9_44, 9_41, 9_39, 9_36, 9_34, 9_31, 9_29, 9_26, 9_24, 9_21, 9_19, 9_16, 9_14, 9_13, 9_10, 9_07, 9_05, 9_02, 8_99, 8_96, 8_93, 8_91, 8_88, 8_85, 8_82, 8_79, 8_77, 8_74, 8_71, 8_70, 8_67, 8_64, 8_61, 8_58, 8_55, 8_52, 8_49, 8_46, 8_43, 8_40, 8_37, 8_34, 8_31, 8_28, 8_27, 8_24, 8_21, 8_17, 8_14, 8_11, 8_08, 8_04, 8_01, 7_98, 7_95, 7_91, 7_88, 7_85, 7_84, 7_80, 7_77, 7_74, 7_70, 7_66, 7_63, 7_60, 7_56, 7_52, 7_49, 7_46, 7_42, 7_41, 7_37, 7_33, 7_30, 7_26, 7_22, 7_18, 7_14, 7_10, 7_07, 7_03, 6_99, 6_98, 6_94, 6_90, 6_85, 6_81, 6_77, 6_73, 6_69, 6_64, 6_60, 6_56, 6_55, 6_50, 6_46, 6_41, 6_36, 6_32, 6_27, 6_22, 6_18, 6_13, 6_12, 6_07, 6_02, 5_96, 5_91, 5_86, 5_80, 5_75, 5_70, 5_69, 5_63, 5_57, 5_51, 5_45, 5_39, 5_33, 5_27, 5_26, 5_19, 5_12, 5_05, 4_98, 4_91, 4_84, 4_83, 4_74, 4_66, 4_57, 4_49, 4_40, 4_39, 4_28, 4_18, 4_07, 3_96, 3_95, 3_81, 3_66, 3_52, 3_51, 3_30, 3_08, 3_07, 2_86, 2_64, 2_63, 2_42, 2_20, 2_19, 1_76, 1_75, 1_32, 1_31, 88, 44, 0, ] a_ : Dict = [ 9_99, 9_91, 9_82, 9_74, 9_66, 9_58, 9_50, 9_41, 9_33, 9_25, 9_16, 9_08, 9_00, 8_99, 8_74, 8_50, 8_25, 8_00, 7_99, 7_00, 6_00, 5_00, 4_00, 3_00, 2_00, 1_00, 0, ] a_ : Tuple = [ 9_99, 9_92, 9_85, 9_78, 9_71, 9_64, 9_57, 9_49, 9_42, 9_35, 9_28, 9_21, 9_14, 9_07, 9_00, 8_99, 8_79, 8_59, 8_40, 8_20, 8_00, 7_99, 7_66, 7_33, 7_00, 6_99, 6_50, 6_00, 5_99, 5_00, 4_99, 4_00, 3_99, 3_00, 2_99, 2_00, 1_99, 1_00, 99, 0, ] a_ : Any = [ 9_99, 9_96, 9_92, 9_89, 9_85, 9_82, 9_79, 9_75, 9_72, 9_68, 9_65, 9_61, 9_58, 9_55, 9_51, 9_48, 9_44, 9_41, 9_38, 9_34, 9_31, 9_27, 9_24, 9_20, 9_17, 9_14, 9_10, 9_07, 9_03, 9_00, 8_99, 8_91, 8_84, 8_76, 8_69, 8_61, 8_53, 8_46, 8_38, 8_30, 8_23, 8_15, 8_08, 8_00, 7_99, 7_88, 7_77, 7_66, 7_55, 7_44, 7_33, 7_22, 7_11, 7_00, 6_99, 6_88, 6_77, 6_66, 6_55, 6_44, 6_33, 6_22, 6_11, 6_00, 5_99, 5_85, 5_71, 5_57, 5_42, 5_28, 5_14, 5_00, 4_99, 4_85, 4_71, 4_57, 4_42, 4_28, 4_14, 4_00, 3_99, 3_79, 3_59, 3_40, 3_20, 3_00, 2_99, 2_79, 2_59, 2_40, 2_20, 2_00, 1_99, 1_66, 1_33, 1_00, 99, 66, 33, 0, ]
75
1
'''simple docstring''' from __future__ import annotations def a_ ( __snake_case : float , __snake_case : float , __snake_case : float ) -> dict[str, float]: """simple docstring""" if (voltage, current, resistance).count(0 ) != 1: raise ValueError('''One and only one argument must be 0''' ) if resistance < 0: raise ValueError('''Resistance cannot be negative''' ) if voltage == 0: return {"voltage": float(current * resistance )} elif current == 0: return {"current": voltage / resistance} elif resistance == 0: return {"resistance": voltage / current} else: raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
75
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) a_ : Union[str, Any] = { """configuration_funnel""": ["""FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FunnelConfig"""], """convert_funnel_original_tf_checkpoint_to_pytorch""": [], """tokenization_funnel""": ["""FunnelTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : List[str] = ["""FunnelTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[int] = [ """FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""", """FunnelBaseModel""", """FunnelForMaskedLM""", """FunnelForMultipleChoice""", """FunnelForPreTraining""", """FunnelForQuestionAnswering""", """FunnelForSequenceClassification""", """FunnelForTokenClassification""", """FunnelModel""", """FunnelPreTrainedModel""", """load_tf_weights_in_funnel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[Any] = [ """TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFFunnelBaseModel""", """TFFunnelForMaskedLM""", """TFFunnelForMultipleChoice""", """TFFunnelForPreTraining""", """TFFunnelForQuestionAnswering""", """TFFunnelForSequenceClassification""", """TFFunnelForTokenClassification""", """TFFunnelModel""", """TFFunnelPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig from .tokenization_funnel import FunnelTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_funnel_fast import FunnelTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_funnel import ( FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, FunnelBaseModel, FunnelForMaskedLM, FunnelForMultipleChoice, FunnelForPreTraining, FunnelForQuestionAnswering, FunnelForSequenceClassification, FunnelForTokenClassification, FunnelModel, FunnelPreTrainedModel, load_tf_weights_in_funnel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_funnel import ( TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, TFFunnelPreTrainedModel, ) else: import sys a_ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
75
1
'''simple docstring''' from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class __UpperCamelCase : lowercase : List[str] lowercase : Optional[str] =None # Automatically constructed lowercase : ClassVar[str] ="dict" lowercase : ClassVar[Any] =None lowercase : str =field(default='Translation' , init=lowerCamelCase__ , repr=lowerCamelCase__ ) def __call__( self ): """simple docstring""" return pa.struct({lang: pa.string() for lang in sorted(self.languages )} ) def lowercase__ ( self ): """simple docstring""" from .features import Value return {k: Value('''string''' ) for k in sorted(self.languages )} @dataclass class __UpperCamelCase : lowercase : Optional[List] =None lowercase : Optional[int] =None lowercase : Optional[str] =None # Automatically constructed lowercase : ClassVar[str] ="dict" lowercase : ClassVar[Any] =None lowercase : str =field(default='TranslationVariableLanguages' , init=lowerCamelCase__ , repr=lowerCamelCase__ ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =sorted(set(self.languages ) ) if self.languages else None lowerCamelCase_ =len(self.languages ) if self.languages else None def __call__( self ): """simple docstring""" return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =set(self.languages ) if self.languages and set(lowerCAmelCase ) - lang_set: raise ValueError( f'''Some languages in example ({', '.join(sorted(set(lowerCAmelCase ) - lang_set ) )}) are not in valid set ({', '.join(lowerCAmelCase )}).''' ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. lowerCamelCase_ =[] for lang, text in translation_dict.items(): if isinstance(lowerCAmelCase, lowerCAmelCase ): translation_tuples.append((lang, text) ) else: translation_tuples.extend([(lang, el) for el in text] ) # Ensure translations are in ascending order by language code. lowerCamelCase_, lowerCamelCase_ =zip(*sorted(lowerCAmelCase ) ) return {"language": languages, "translation": translations} def lowercase__ ( self ): """simple docstring""" from .features import Sequence, Value return { "language": Sequence(Value('''string''' ) ), "translation": Sequence(Value('''string''' ) ), }
75
'''simple docstring''' import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoProcessor from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wavaveca.test_feature_extraction_wavaveca import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput if is_torch_available(): from transformers import WavaVecaForCTC @require_pyctcdecode class __UpperCamelCase ( unittest.TestCase ): def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split() lowerCamelCase_ =dict(zip(lowerCAmelCase, range(len(lowerCAmelCase ) ) ) ) lowerCamelCase_ ={ '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>''', } lowerCamelCase_ ={ '''feature_size''': 1, '''padding_value''': 0.0, '''sampling_rate''': 16_000, '''return_attention_mask''': False, '''do_normalize''': True, } lowerCamelCase_ =tempfile.mkdtemp() lowerCamelCase_ =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCamelCase_ =os.path.join(self.tmpdirname, lowerCAmelCase ) with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp: fp.write(json.dumps(lowerCAmelCase ) + '''\n''' ) with open(self.feature_extraction_file, '''w''', encoding='''utf-8''' ) as fp: fp.write(json.dumps(lowerCAmelCase ) + '''\n''' ) # load decoder from hub lowerCamelCase_ ='''hf-internal-testing/ngram-beam-search-decoder''' def lowercase__ ( self, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.add_kwargs_tokens_map.copy() kwargs.update(lowerCAmelCase ) return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase ) def lowercase__ ( self, **lowerCAmelCase ): """simple docstring""" return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname, **lowerCAmelCase ) def lowercase__ ( self, **lowerCAmelCase ): """simple docstring""" return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name, **lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_feature_extractor() lowerCamelCase_ =self.get_decoder() lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname ) # tokenizer self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer, lowerCAmelCase ) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor, lowerCAmelCase ) # decoder self.assertEqual(processor.decoder._alphabet.labels, decoder._alphabet.labels ) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set, decoder.model_container[decoder._model_key]._unigram_set, ) self.assertIsInstance(processor.decoder, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =WavaVecaProcessorWithLM( tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor(), decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname ) # make sure that error is thrown when decoder alphabet doesn't match lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained( self.tmpdirname, alpha=5.0, beta=3.0, score_boundary=-7.0, unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha, 5.0 ) self.assertEqual(processor.language_model.beta, 3.0 ) self.assertEqual(processor.language_model.score_boundary, -7.0 ) self.assertEqual(processor.language_model.unk_score_offset, 3 ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(['''xx'''] ) with self.assertRaisesRegex(lowerCAmelCase, '''include''' ): WavaVecaProcessorWithLM( tokenizer=lowerCAmelCase, feature_extractor=self.get_feature_extractor(), decoder=self.get_decoder() ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_feature_extractor() lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_decoder() lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase ) lowerCamelCase_ =floats_list((3, 1_000) ) lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''np''' ) lowerCamelCase_ =processor(lowerCAmelCase, return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2 ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_feature_extractor() lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_decoder() lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase ) lowerCamelCase_ ='''This is a test string''' lowerCamelCase_ =processor(text=lowerCAmelCase ) lowerCamelCase_ =tokenizer(lowerCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key] ) def lowercase__ ( self, lowerCAmelCase=(2, 10, 16), lowerCAmelCase=77 ): """simple docstring""" np.random.seed(lowerCAmelCase ) return np.random.rand(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_feature_extractor() lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_decoder() lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase ) lowerCamelCase_ =self._get_dummy_logits(shape=(10, 16), seed=13 ) lowerCamelCase_ =processor.decode(lowerCAmelCase ) lowerCamelCase_ =decoder.decode_beams(lowerCAmelCase )[0] self.assertEqual(decoded_decoder[0], decoded_processor.text ) self.assertEqual('''</s> <s> </s>''', decoded_processor.text ) self.assertEqual(decoded_decoder[-2], decoded_processor.logit_score ) self.assertEqual(decoded_decoder[-1], decoded_processor.lm_score ) @parameterized.expand([[None], ['''fork'''], ['''spawn''']] ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.get_feature_extractor() lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_decoder() lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase ) lowerCamelCase_ =self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: lowerCamelCase_ =processor.batch_decode(lowerCAmelCase ) else: with get_context(lowerCAmelCase ).Pool() as pool: lowerCamelCase_ =processor.batch_decode(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =list(lowerCAmelCase ) with get_context('''fork''' ).Pool() as p: lowerCamelCase_ =decoder.decode_beams_batch(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =[], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0] ) logit_scores_decoder.append(beams[0][-2] ) lm_scores_decoder.append(beams[0][-1] ) self.assertListEqual(lowerCAmelCase, decoded_processor.text ) self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''], decoded_processor.text ) self.assertListEqual(lowerCAmelCase, decoded_processor.logit_score ) self.assertListEqual(lowerCAmelCase, decoded_processor.lm_score ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_feature_extractor() lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_decoder() lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase ) lowerCamelCase_ =self._get_dummy_logits() lowerCamelCase_ =15 lowerCamelCase_ =-2_0.0 lowerCamelCase_ =-4.0 lowerCamelCase_ =processor.batch_decode( lowerCAmelCase, beam_width=lowerCAmelCase, beam_prune_logp=lowerCAmelCase, token_min_logp=lowerCAmelCase, ) lowerCamelCase_ =decoded_processor_out.text lowerCamelCase_ =list(lowerCAmelCase ) with get_context('''fork''' ).Pool() as pool: lowerCamelCase_ =decoder.decode_beams_batch( lowerCAmelCase, lowerCAmelCase, beam_width=lowerCAmelCase, beam_prune_logp=lowerCAmelCase, token_min_logp=lowerCAmelCase, ) lowerCamelCase_ =[d[0][0] for d in decoded_decoder_out] lowerCamelCase_ =[d[0][2] for d in decoded_decoder_out] lowerCamelCase_ =[d[0][3] for d in decoded_decoder_out] self.assertListEqual(lowerCAmelCase, lowerCAmelCase ) self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''], lowerCAmelCase ) self.assertTrue(np.array_equal(lowerCAmelCase, decoded_processor_out.logit_score ) ) self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7], lowerCAmelCase, atol=1e-3 ) ) self.assertTrue(np.array_equal(lowerCAmelCase, decoded_processor_out.lm_score ) ) self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4], lowerCAmelCase, atol=1e-3 ) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_feature_extractor() lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_decoder() lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase ) lowerCamelCase_ =self._get_dummy_logits() lowerCamelCase_ =2.0 lowerCamelCase_ =5.0 lowerCamelCase_ =-2_0.0 lowerCamelCase_ =True lowerCamelCase_ =processor.batch_decode( lowerCAmelCase, alpha=lowerCAmelCase, beta=lowerCAmelCase, unk_score_offset=lowerCAmelCase, lm_score_boundary=lowerCAmelCase, ) lowerCamelCase_ =decoded_processor_out.text lowerCamelCase_ =list(lowerCAmelCase ) decoder.reset_params( alpha=lowerCAmelCase, beta=lowerCAmelCase, unk_score_offset=lowerCAmelCase, lm_score_boundary=lowerCAmelCase, ) with get_context('''fork''' ).Pool() as pool: lowerCamelCase_ =decoder.decode_beams_batch( lowerCAmelCase, lowerCAmelCase, ) lowerCamelCase_ =[d[0][0] for d in decoded_decoder_out] self.assertListEqual(lowerCAmelCase, lowerCAmelCase ) self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''], lowerCAmelCase ) lowerCamelCase_ =processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha, 2.0 ) self.assertEqual(lm_model.beta, 5.0 ) self.assertEqual(lm_model.unk_score_offset, -2_0.0 ) self.assertEqual(lm_model.score_boundary, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) lowerCamelCase_ =processor.decoder.model_container[processor.decoder._model_key] lowerCamelCase_ =Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() lowerCamelCase_ =os.listdir(lowerCAmelCase ) lowerCamelCase_ =['''alphabet.json''', '''language_model'''] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(lowerCAmelCase, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =snapshot_download('''hf-internal-testing/processor_with_lm''' ) lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained(lowerCAmelCase ) lowerCamelCase_ =processor.decoder.model_container[processor.decoder._model_key] lowerCamelCase_ =Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() lowerCamelCase_ =os.listdir(lowerCAmelCase ) lowerCamelCase_ =os.listdir(lowerCAmelCase ) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(lowerCAmelCase, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) lowerCamelCase_ =AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' ) lowerCamelCase_ =floats_list((3, 1_000) ) lowerCamelCase_ =processor_wavaveca(lowerCAmelCase, return_tensors='''np''' ) lowerCamelCase_ =processor_auto(lowerCAmelCase, return_tensors='''np''' ) for key in input_wavaveca.keys(): self.assertAlmostEqual(input_wavaveca[key].sum(), input_auto[key].sum(), delta=1e-2 ) lowerCamelCase_ =self._get_dummy_logits() lowerCamelCase_ =processor_wavaveca.batch_decode(lowerCAmelCase ) lowerCamelCase_ =processor_auto.batch_decode(lowerCAmelCase ) self.assertListEqual(decoded_wavaveca.text, decoded_auto.text ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_feature_extractor() lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_decoder() lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase ) self.assertListEqual( processor.model_input_names, feature_extractor.model_input_names, msg='''`processor` and `feature_extractor` model input names do not match''', ) @staticmethod def lowercase__ ( lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =[d[key] for d in offsets] return retrieved_list def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) lowerCamelCase_ =self._get_dummy_logits()[0] lowerCamelCase_ =processor.decode(lowerCAmelCase, output_word_offsets=lowerCAmelCase ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ), 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(lowerCAmelCase, lowerCAmelCase ) ) self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''], '''word''' ) ), outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''], '''word''' ), ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''], '''start_offset''' ), [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''], '''end_offset''' ), [1, 3, 5] ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) lowerCamelCase_ =self._get_dummy_logits() lowerCamelCase_ =processor.batch_decode(lowerCAmelCase, output_word_offsets=lowerCAmelCase ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ), 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(lowerCAmelCase, lowerCAmelCase ) ) self.assertListEqual( [''' '''.join(self.get_from_offsets(lowerCAmelCase, '''word''' ) ) for o in outputs['''word_offsets''']], outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0], '''word''' ), ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0], '''start_offset''' ), [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0], '''end_offset''' ), [1, 3, 5] ) @slow @require_torch @require_torchaudio def lowercase__ ( self ): """simple docstring""" import torch lowerCamelCase_ =load_dataset('''common_voice''', '''en''', split='''train''', streaming=lowerCAmelCase ) lowerCamelCase_ =ds.cast_column('''audio''', datasets.Audio(sampling_rate=16_000 ) ) lowerCamelCase_ =iter(lowerCAmelCase ) lowerCamelCase_ =next(lowerCAmelCase ) lowerCamelCase_ =AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) lowerCamelCase_ =WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train lowerCamelCase_ =processor(sample['''audio''']['''array'''], return_tensors='''pt''' ).input_values with torch.no_grad(): lowerCamelCase_ =model(lowerCAmelCase ).logits.cpu().numpy() lowerCamelCase_ =processor.decode(logits[0], output_word_offsets=lowerCAmelCase ) lowerCamelCase_ =model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate lowerCamelCase_ =[ { '''start_time''': d['''start_offset'''] * time_offset, '''end_time''': d['''end_offset'''] * time_offset, '''word''': d['''word'''], } for d in output['''word_offsets'''] ] lowerCamelCase_ ='''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL''' # output words self.assertEqual(''' '''.join(self.get_from_offsets(lowerCAmelCase, '''word''' ) ), lowerCAmelCase ) self.assertEqual(''' '''.join(self.get_from_offsets(lowerCAmelCase, '''word''' ) ), output.text ) # output times lowerCamelCase_ =torch.tensor(self.get_from_offsets(lowerCAmelCase, '''start_time''' ) ) lowerCamelCase_ =torch.tensor(self.get_from_offsets(lowerCAmelCase, '''end_time''' ) ) # fmt: off lowerCamelCase_ =torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] ) lowerCamelCase_ =torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] ) # fmt: on self.assertTrue(torch.allclose(lowerCAmelCase, lowerCAmelCase, atol=0.0_1 ) ) self.assertTrue(torch.allclose(lowerCAmelCase, lowerCAmelCase, atol=0.0_1 ) )
75
1
'''simple docstring''' from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class __UpperCamelCase : def __init__( self, lowerCAmelCase, lowerCAmelCase=13, lowerCAmelCase=7, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=99, lowerCAmelCase=32, lowerCAmelCase=2, lowerCAmelCase=4, lowerCAmelCase=37, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=16, lowerCAmelCase=2, lowerCAmelCase=0.0_2, lowerCAmelCase=3, lowerCAmelCase=4, lowerCAmelCase=None, ): """simple docstring""" lowerCamelCase_ =parent lowerCamelCase_ =13 lowerCamelCase_ =7 lowerCamelCase_ =True lowerCamelCase_ =True lowerCamelCase_ =True lowerCamelCase_ =True lowerCamelCase_ =99 lowerCamelCase_ =384 lowerCamelCase_ =2 lowerCamelCase_ =4 lowerCamelCase_ =37 lowerCamelCase_ ='''gelu''' lowerCamelCase_ =0.1 lowerCamelCase_ =0.1 lowerCamelCase_ =512 lowerCamelCase_ =16 lowerCamelCase_ =2 lowerCamelCase_ =0.0_2 lowerCamelCase_ =3 lowerCamelCase_ =4 lowerCamelCase_ =128 lowerCamelCase_ =2 lowerCamelCase_ =9 lowerCamelCase_ =1 lowerCamelCase_ =None def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) lowerCamelCase_ =None if self.use_input_mask: lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase_ =None if self.use_token_type_ids: lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size ) lowerCamelCase_ =None lowerCamelCase_ =None lowerCamelCase_ =None if self.use_labels: lowerCamelCase_ =ids_tensor([self.batch_size], self.type_sequence_label_size ) lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.num_labels ) lowerCamelCase_ =ids_tensor([self.batch_size], self.num_choices ) lowerCamelCase_ =ConvBertConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, return_dict=lowerCAmelCase, ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =TFConvBertModel(config=lowerCAmelCase ) lowerCamelCase_ ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} lowerCamelCase_ =[input_ids, input_mask] lowerCamelCase_ =model(lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =TFConvBertForMaskedLM(config=lowerCAmelCase ) lowerCamelCase_ ={ '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } lowerCamelCase_ =model(lowerCAmelCase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.num_labels lowerCamelCase_ =TFConvBertForSequenceClassification(config=lowerCAmelCase ) lowerCamelCase_ ={ '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } lowerCamelCase_ =model(lowerCAmelCase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.num_choices lowerCamelCase_ =TFConvBertForMultipleChoice(config=lowerCAmelCase ) lowerCamelCase_ =tf.tile(tf.expand_dims(lowerCAmelCase, 1 ), (1, self.num_choices, 1) ) lowerCamelCase_ =tf.tile(tf.expand_dims(lowerCAmelCase, 1 ), (1, self.num_choices, 1) ) lowerCamelCase_ =tf.tile(tf.expand_dims(lowerCAmelCase, 1 ), (1, self.num_choices, 1) ) lowerCamelCase_ ={ '''input_ids''': multiple_choice_inputs_ids, '''attention_mask''': multiple_choice_input_mask, '''token_type_ids''': multiple_choice_token_type_ids, } lowerCamelCase_ =model(lowerCAmelCase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.num_labels lowerCamelCase_ =TFConvBertForTokenClassification(config=lowerCAmelCase ) lowerCamelCase_ ={ '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } lowerCamelCase_ =model(lowerCAmelCase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =TFConvBertForQuestionAnswering(config=lowerCAmelCase ) lowerCamelCase_ ={ '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } lowerCamelCase_ =model(lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.prepare_config_and_inputs() ( ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ) =config_and_inputs lowerCamelCase_ ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase : Any =( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) lowercase : List[str] =( { 'feature-extraction': TFConvBertModel, 'fill-mask': TFConvBertForMaskedLM, 'question-answering': TFConvBertForQuestionAnswering, 'text-classification': TFConvBertForSequenceClassification, 'token-classification': TFConvBertForTokenClassification, 'zero-shot': TFConvBertForSequenceClassification, } if is_tf_available() else {} ) lowercase : int =False lowercase : List[Any] =False lowercase : Dict =False def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =TFConvBertModelTester(self ) lowerCamelCase_ =ConfigTester(self, config_class=lowerCAmelCase, hidden_size=37 ) def lowercase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase ) @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ =True lowerCamelCase_ =True if hasattr(lowerCAmelCase, '''use_cache''' ): lowerCamelCase_ =True lowerCamelCase_ =getattr(self.model_tester, '''encoder_seq_length''', self.model_tester.seq_length ) lowerCamelCase_ =getattr(self.model_tester, '''key_length''', lowerCAmelCase ) for model_class in self.all_model_classes: lowerCamelCase_ =self._prepare_for_class(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =model_class(lowerCAmelCase ) lowerCamelCase_ =len(model(lowerCAmelCase ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowerCAmelCase, saved_model=lowerCAmelCase ) lowerCamelCase_ =os.path.join(lowerCAmelCase, '''saved_model''', '''1''' ) lowerCamelCase_ =tf.keras.models.load_model(lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase ) if self.is_encoder_decoder: lowerCamelCase_ =outputs['''encoder_hidden_states'''] lowerCamelCase_ =outputs['''encoder_attentions'''] else: lowerCamelCase_ =outputs['''hidden_states'''] lowerCamelCase_ =outputs['''attentions'''] self.assertEqual(len(lowerCAmelCase ), lowerCAmelCase ) lowerCamelCase_ =getattr( self.model_tester, '''expected_num_hidden_layers''', self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(lowerCAmelCase ), lowerCAmelCase ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ), [self.model_tester.seq_length, self.model_tester.hidden_size], ) self.assertEqual(len(lowerCAmelCase ), self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length], ) @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' ) self.assertIsNotNone(lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ =True lowerCamelCase_ =getattr(self.model_tester, '''decoder_seq_length''', self.model_tester.seq_length ) lowerCamelCase_ =getattr(self.model_tester, '''encoder_seq_length''', self.model_tester.seq_length ) lowerCamelCase_ =getattr(self.model_tester, '''key_length''', lowerCAmelCase ) lowerCamelCase_ =getattr(self.model_tester, '''key_length''', lowerCAmelCase ) def check_decoder_attentions_output(lowerCAmelCase ): lowerCamelCase_ =len(lowerCAmelCase ) self.assertEqual(out_len % 2, 0 ) lowerCamelCase_ =outputs.decoder_attentions self.assertEqual(len(lowerCAmelCase ), self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length], ) def check_encoder_attentions_output(lowerCAmelCase ): lowerCamelCase_ =[ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(lowerCAmelCase ), self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length], ) for model_class in self.all_model_classes: lowerCamelCase_ =True lowerCamelCase_ =False lowerCamelCase_ =model_class(lowerCAmelCase ) lowerCamelCase_ =model(self._prepare_for_class(lowerCAmelCase, lowerCAmelCase ) ) lowerCamelCase_ =len(lowerCAmelCase ) self.assertEqual(config.output_hidden_states, lowerCAmelCase ) check_encoder_attentions_output(lowerCAmelCase ) if self.is_encoder_decoder: lowerCamelCase_ =model_class(lowerCAmelCase ) lowerCamelCase_ =model(self._prepare_for_class(lowerCAmelCase, lowerCAmelCase ) ) self.assertEqual(config.output_hidden_states, lowerCAmelCase ) check_decoder_attentions_output(lowerCAmelCase ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] lowerCamelCase_ =True lowerCamelCase_ =model_class(lowerCAmelCase ) lowerCamelCase_ =model(self._prepare_for_class(lowerCAmelCase, lowerCAmelCase ) ) self.assertEqual(config.output_hidden_states, lowerCAmelCase ) check_encoder_attentions_output(lowerCAmelCase ) # Check attention is always last and order is fine lowerCamelCase_ =True lowerCamelCase_ =True lowerCamelCase_ =model_class(lowerCAmelCase ) lowerCamelCase_ =model(self._prepare_for_class(lowerCAmelCase, lowerCAmelCase ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1), len(lowerCAmelCase ) ) self.assertEqual(model.config.output_hidden_states, lowerCAmelCase ) check_encoder_attentions_output(lowerCAmelCase ) @require_tf class __UpperCamelCase ( unittest.TestCase ): @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' ) lowerCamelCase_ =tf.constant([[0, 1, 2, 3, 4, 5]] ) lowerCamelCase_ =model(lowerCAmelCase )[0] lowerCamelCase_ =[1, 6, 768] self.assertEqual(output.shape, lowerCAmelCase ) lowerCamelCase_ =tf.constant( [ [ [-0.0_3_4_7_5_4_9_3, -0.4_6_8_6_0_3_4, -0.3_0_6_3_8_8_3_2], [0.2_2_6_3_7_2_4_8, -0.2_6_9_8_8_6_4_6, -0.7_4_2_3_4_2_4], [0.1_0_3_2_4_8_6_8, -0.4_5_0_1_3_5_0_8, -0.5_8_2_8_0_7_8_4], ] ] ) tf.debugging.assert_near(output[:, :3, :3], lowerCAmelCase, atol=1e-4 )
75
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPixaPixPipeline, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase : List[Any] =StableDiffusionInstructPixaPixPipeline lowercase : List[Any] =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'} lowercase : Optional[Any] =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS lowercase : Union[str, Any] =IMAGE_TO_IMAGE_IMAGE_PARAMS lowercase : List[Any] =IMAGE_TO_IMAGE_IMAGE_PARAMS def lowercase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCamelCase_ =UNetaDConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=8, out_channels=4, down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D'''), up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D'''), cross_attention_dim=32, ) lowerCamelCase_ =PNDMScheduler(skip_prk_steps=lowerCAmelCase ) torch.manual_seed(0 ) lowerCamelCase_ =AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], latent_channels=4, ) torch.manual_seed(0 ) lowerCamelCase_ =CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, ) lowerCamelCase_ =CLIPTextModel(lowerCAmelCase ) lowerCamelCase_ =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) lowerCamelCase_ ={ '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=0 ): """simple docstring""" lowerCamelCase_ =floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase ) lowerCamelCase_ =image.cpu().permute(0, 2, 3, 1 )[0] lowerCamelCase_ =Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('''RGB''' ) if str(lowerCAmelCase ).startswith('''mps''' ): lowerCamelCase_ =torch.manual_seed(lowerCAmelCase ) else: lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase ) lowerCamelCase_ ={ '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''image_guidance_scale''': 1, '''output_type''': '''numpy''', } return inputs def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCamelCase_ =self.get_dummy_components() lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase ) lowerCamelCase_ =sd_pipe.to(lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase ) lowerCamelCase_ =sd_pipe(**lowerCAmelCase ).images lowerCamelCase_ =image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCamelCase_ =np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCamelCase_ =self.get_dummy_components() lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase ) lowerCamelCase_ =sd_pipe.to(lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase ) lowerCamelCase_ ='''french fries''' lowerCamelCase_ =sd_pipe(**lowerCAmelCase, negative_prompt=lowerCAmelCase ) lowerCamelCase_ =output.images lowerCamelCase_ =image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCamelCase_ =np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCamelCase_ =self.get_dummy_components() lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase ) lowerCamelCase_ =sd_pipe.to(lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase ) lowerCamelCase_ =[inputs['''prompt''']] * 2 lowerCamelCase_ =np.array(inputs['''image'''] ).astype(np.floataa ) / 2_5_5.0 lowerCamelCase_ =torch.from_numpy(lowerCAmelCase ).unsqueeze(0 ).to(lowerCAmelCase ) lowerCamelCase_ =image / 2 + 0.5 lowerCamelCase_ =image.permute(0, 3, 1, 2 ) lowerCamelCase_ =image.repeat(2, 1, 1, 1 ) lowerCamelCase_ =sd_pipe(**lowerCAmelCase ).images lowerCamelCase_ =image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) lowerCamelCase_ =np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCamelCase_ =self.get_dummy_components() lowerCamelCase_ =EulerAncestralDiscreteScheduler( beta_start=0.0_0_0_8_5, beta_end=0.0_1_2, beta_schedule='''scaled_linear''' ) lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase ) lowerCamelCase_ =sd_pipe.to(lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase ) lowerCamelCase_ =sd_pipe(**lowerCAmelCase ).images lowerCamelCase_ =image[0, -3:, -3:, -1] lowerCamelCase_ =[round(lowerCAmelCase, 4 ) for x in image_slice.flatten().tolist()] print(''','''.join([str(lowerCAmelCase ) for x in slice] ) ) assert image.shape == (1, 32, 32, 3) lowerCamelCase_ =np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def lowercase__ ( self ): """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_dummy_components() lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase ) lowerCamelCase_ =VaeImageProcessor(do_resize=lowerCAmelCase, do_normalize=lowerCAmelCase ) lowerCamelCase_ =pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =pipe(**self.get_dummy_inputs_by_type(lowerCAmelCase, input_image_type='''pt''' ) )[0] lowerCamelCase_ =components['''vae'''] lowerCamelCase_ =self.get_dummy_inputs_by_type(lowerCAmelCase, input_image_type='''pt''' ) for image_param in self.image_latents_params: if image_param in inputs.keys(): lowerCamelCase_ =vae.encode(inputs[image_param] ).latent_dist.mode() lowerCamelCase_ =pipe(**lowerCAmelCase )[0] lowerCamelCase_ =np.abs(out - out_latents_inputs ).max() self.assertLess(lowerCAmelCase, 1e-4, '''passing latents as image input generate different result from passing image''' ) @slow @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): def lowercase__ ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase__ ( self, lowerCAmelCase=0 ): """simple docstring""" lowerCamelCase_ =torch.manual_seed(lowerCAmelCase ) lowerCamelCase_ =load_image( '''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' ) lowerCamelCase_ ={ '''prompt''': '''turn him into a cyborg''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''image_guidance_scale''': 1.0, '''output_type''': '''numpy''', } return inputs def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''', safety_checker=lowerCAmelCase ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) pipe.enable_attention_slicing() lowerCamelCase_ =self.get_inputs() lowerCamelCase_ =pipe(**lowerCAmelCase ).images lowerCamelCase_ =image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) lowerCamelCase_ =np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''', safety_checker=lowerCAmelCase ) lowerCamelCase_ =LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) pipe.enable_attention_slicing() lowerCamelCase_ =self.get_inputs() lowerCamelCase_ =pipe(**lowerCAmelCase ).images lowerCamelCase_ =image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) lowerCamelCase_ =np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''', safety_checker=lowerCAmelCase ) lowerCamelCase_ =DDIMScheduler.from_config(pipe.scheduler.config ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) pipe.enable_attention_slicing() lowerCamelCase_ =self.get_inputs() lowerCamelCase_ =pipe(**lowerCAmelCase ).images lowerCamelCase_ =image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) lowerCamelCase_ =np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =0 def callback_fn(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) -> None: lowerCamelCase_ =True nonlocal number_of_steps number_of_steps += 1 if step == 1: lowerCamelCase_ =latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) lowerCamelCase_ =latents[0, -3:, -3:, -1] lowerCamelCase_ =np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 elif step == 2: lowerCamelCase_ =latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) lowerCamelCase_ =latents[0, -3:, -3:, -1] lowerCamelCase_ =np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 lowerCamelCase_ =False lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''', safety_checker=lowerCAmelCase, torch_dtype=torch.floataa ) lowerCamelCase_ =pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) pipe.enable_attention_slicing() lowerCamelCase_ =self.get_inputs() pipe(**lowerCAmelCase, callback=lowerCAmelCase, callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def lowercase__ ( self ): """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''', safety_checker=lowerCAmelCase, torch_dtype=torch.floataa ) lowerCamelCase_ =pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() lowerCamelCase_ =self.get_inputs() lowerCamelCase_ =pipe(**lowerCAmelCase ) lowerCamelCase_ =torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 10**9 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 lowerCamelCase_ =inputs['''image'''].resize((504, 504) ) lowerCamelCase_ ='''timbrooks/instruct-pix2pix''' lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained( lowerCAmelCase, safety_checker=lowerCAmelCase, ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) pipe.enable_attention_slicing() lowerCamelCase_ =pipe(**lowerCAmelCase ) lowerCamelCase_ =output.images[0] lowerCamelCase_ =image[255:258, 383:386, -1] assert image.shape == (504, 504, 3) lowerCamelCase_ =np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
75
1
'''simple docstring''' from typing import Dict import numpy as np from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException if is_tf_available(): import tensorflow as tf from ..tf_utils import stable_softmax if is_torch_available(): import torch a_ : Optional[int] = logging.get_logger(__name__) @add_end_docstrings( lowerCamelCase__ , r'\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n ' , ) class __UpperCamelCase ( lowerCamelCase__ ): def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" if self.framework == "tf": lowerCamelCase_ =tf.where(input_ids == self.tokenizer.mask_token_id ).numpy() elif self.framework == "pt": lowerCamelCase_ =torch.nonzero(input_ids == self.tokenizer.mask_token_id, as_tuple=lowerCAmelCase ) else: raise ValueError('''Unsupported framework''' ) return masked_index def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.get_masked_index(lowerCAmelCase ) lowerCamelCase_ =np.prod(masked_index.shape ) if numel < 1: raise PipelineException( '''fill-mask''', self.model.base_model_prefix, f'''No mask_token ({self.tokenizer.mask_token}) found on the input''', ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" if isinstance(lowerCAmelCase, lowerCAmelCase ): for model_input in model_inputs: self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] ) else: for input_ids in model_inputs["input_ids"]: self._ensure_exactly_one_mask_token(lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None, **lowerCAmelCase ): """simple docstring""" if return_tensors is None: lowerCamelCase_ =self.framework lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=lowerCAmelCase ) self.ensure_exactly_one_mask_token(lowerCAmelCase ) return model_inputs def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.model(**lowerCAmelCase ) lowerCamelCase_ =model_inputs['''input_ids'''] return model_outputs def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=5, lowerCAmelCase=None ): """simple docstring""" if target_ids is not None and target_ids.shape[0] < top_k: lowerCamelCase_ =target_ids.shape[0] lowerCamelCase_ =model_outputs['''input_ids'''][0] lowerCamelCase_ =model_outputs['''logits'''] if self.framework == "tf": lowerCamelCase_ =tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0] lowerCamelCase_ =outputs.numpy() lowerCamelCase_ =outputs[0, masked_index, :] lowerCamelCase_ =stable_softmax(lowerCAmelCase, axis=-1 ) if target_ids is not None: lowerCamelCase_ =tf.gather_nd(tf.squeeze(lowerCAmelCase, 0 ), target_ids.reshape(-1, 1 ) ) lowerCamelCase_ =tf.expand_dims(lowerCAmelCase, 0 ) lowerCamelCase_ =tf.math.top_k(lowerCAmelCase, k=lowerCAmelCase ) lowerCamelCase_, lowerCamelCase_ =topk.values.numpy(), topk.indices.numpy() else: lowerCamelCase_ =torch.nonzero(input_ids == self.tokenizer.mask_token_id, as_tuple=lowerCAmelCase ).squeeze(-1 ) # Fill mask pipeline supports only one ${mask_token} per sample lowerCamelCase_ =outputs[0, masked_index, :] lowerCamelCase_ =logits.softmax(dim=-1 ) if target_ids is not None: lowerCamelCase_ =probs[..., target_ids] lowerCamelCase_, lowerCamelCase_ =probs.topk(lowerCAmelCase ) lowerCamelCase_ =[] lowerCamelCase_ =values.shape[0] == 1 for i, (_values, _predictions) in enumerate(zip(values.tolist(), predictions.tolist() ) ): lowerCamelCase_ =[] for v, p in zip(_values, _predictions ): # Copy is important since we're going to modify this array in place lowerCamelCase_ =input_ids.numpy().copy() if target_ids is not None: lowerCamelCase_ =target_ids[p].tolist() lowerCamelCase_ =p # Filter padding out: lowerCamelCase_ =tokens[np.where(tokens != self.tokenizer.pad_token_id )] # Originally we skip special tokens to give readable output. # For multi masks though, the other [MASK] would be removed otherwise # making the output look odd, so we add them back lowerCamelCase_ =self.tokenizer.decode(lowerCAmelCase, skip_special_tokens=lowerCAmelCase ) lowerCamelCase_ ={'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence} row.append(lowerCAmelCase ) result.append(lowerCAmelCase ) if single_mask: return result[0] return result def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ): """simple docstring""" if isinstance(lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =[targets] try: lowerCamelCase_ =self.tokenizer.get_vocab() except Exception: lowerCamelCase_ ={} lowerCamelCase_ =[] for target in targets: lowerCamelCase_ =vocab.get(lowerCAmelCase, lowerCAmelCase ) if id_ is None: lowerCamelCase_ =self.tokenizer( lowerCAmelCase, add_special_tokens=lowerCAmelCase, return_attention_mask=lowerCAmelCase, return_token_type_ids=lowerCAmelCase, max_length=1, truncation=lowerCAmelCase, )['''input_ids'''] if len(lowerCAmelCase ) == 0: logger.warning( f'''The specified target token `{target}` does not exist in the model vocabulary. ''' '''We cannot replace it with anything meaningful, ignoring it''' ) continue lowerCamelCase_ =input_ids[0] # XXX: If users encounter this pass # it becomes pretty slow, so let's make sure # The warning enables them to fix the input to # get faster performance. logger.warning( f'''The specified target token `{target}` does not exist in the model vocabulary. ''' f'''Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.''' ) target_ids.append(id_ ) lowerCamelCase_ =list(set(lowerCAmelCase ) ) if len(lowerCAmelCase ) == 0: raise ValueError('''At least one target must be provided when passed.''' ) lowerCamelCase_ =np.array(lowerCAmelCase ) return target_ids def lowercase__ ( self, lowerCAmelCase=None, lowerCAmelCase=None ): """simple docstring""" lowerCamelCase_ ={} if targets is not None: lowerCamelCase_ =self.get_target_ids(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =target_ids if top_k is not None: lowerCamelCase_ =top_k if self.tokenizer.mask_token_id is None: raise PipelineException( '''fill-mask''', self.model.base_model_prefix, '''The tokenizer does not define a `mask_token`.''' ) return {}, {}, postprocess_params def __call__( self, lowerCAmelCase, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =super().__call__(lowerCAmelCase, **lowerCAmelCase ) if isinstance(lowerCAmelCase, lowerCAmelCase ) and len(lowerCAmelCase ) == 1: return outputs[0] return outputs
75
'''simple docstring''' from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class __UpperCamelCase : lowercase : Union[str, Any] =XGLMConfig lowercase : Optional[Any] ={} lowercase : Optional[int] ='gelu' def __init__( self, lowerCAmelCase, lowerCAmelCase=14, lowerCAmelCase=7, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=99, lowerCAmelCase=32, lowerCAmelCase=2, lowerCAmelCase=4, lowerCAmelCase=37, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=0.0_2, ): """simple docstring""" lowerCamelCase_ =parent lowerCamelCase_ =batch_size lowerCamelCase_ =seq_length lowerCamelCase_ =is_training lowerCamelCase_ =use_input_mask lowerCamelCase_ =use_labels lowerCamelCase_ =vocab_size lowerCamelCase_ =d_model lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =ffn_dim lowerCamelCase_ =activation_function lowerCamelCase_ =activation_dropout lowerCamelCase_ =attention_dropout lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =initializer_range lowerCamelCase_ =None lowerCamelCase_ =0 lowerCamelCase_ =2 lowerCamelCase_ =1 def lowercase__ ( self ): """simple docstring""" return XGLMConfig.from_pretrained('''facebook/xglm-564M''' ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length], self.vocab_size ), clip_value_min=0, clip_value_max=3 ) lowerCamelCase_ =None if self.use_input_mask: lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase_ =self.get_config() lowerCamelCase_ =floats_tensor([self.num_hidden_layers, self.num_attention_heads], 2 ) return ( config, input_ids, input_mask, head_mask, ) def lowercase__ ( self ): """simple docstring""" return XGLMConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, num_layers=self.num_hidden_layers, attention_heads=self.num_attention_heads, ffn_dim=self.ffn_dim, activation_function=self.activation_function, activation_dropout=self.activation_dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, use_cache=lowerCAmelCase, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, return_dict=lowerCAmelCase, ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.prepare_config_and_inputs() ( ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ) =config_and_inputs lowerCamelCase_ ={ '''input_ids''': input_ids, '''head_mask''': head_mask, } return config, inputs_dict @require_tf class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase : int =(TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () lowercase : Optional[Any] =(TFXGLMForCausalLM,) if is_tf_available() else () lowercase : Tuple =( {'feature-extraction': TFXGLMModel, 'text-generation': TFXGLMForCausalLM} if is_tf_available() else {} ) lowercase : Optional[Any] =False lowercase : Optional[Any] =False lowercase : Optional[int] =False def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =TFXGLMModelTester(self ) lowerCamelCase_ =ConfigTester(self, config_class=lowerCAmelCase, n_embd=37 ) def lowercase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() @slow def lowercase__ ( self ): """simple docstring""" for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ =TFXGLMModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) @unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''' ) def lowercase__ ( self ): """simple docstring""" super().test_resize_token_embeddings() @require_tf class __UpperCamelCase ( unittest.TestCase ): @slow def lowercase__ ( self, lowerCAmelCase=True ): """simple docstring""" lowerCamelCase_ =TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' ) lowerCamelCase_ =tf.convert_to_tensor([[2, 268, 9_865]], dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off lowerCamelCase_ =[2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581] # fmt: on lowerCamelCase_ =model.generate(lowerCAmelCase, do_sample=lowerCAmelCase, num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist(), lowerCAmelCase ) @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' ) lowerCamelCase_ =TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' ) tf.random.set_seed(0 ) lowerCamelCase_ =tokenizer('''Today is a nice day and''', return_tensors='''tf''' ) lowerCamelCase_ =tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(''':/CPU:0''' ): lowerCamelCase_ =model.generate(lowerCAmelCase, do_sample=lowerCAmelCase, seed=[7, 0] ) lowerCamelCase_ =tokenizer.decode(output_ids[0], skip_special_tokens=lowerCAmelCase ) lowerCamelCase_ =( '''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due''' ) self.assertEqual(lowerCAmelCase, lowerCAmelCase ) @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' ) lowerCamelCase_ =XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' ) lowerCamelCase_ ='''left''' # use different length sentences to test batching lowerCamelCase_ =[ '''This is an extremelly long sentence that only exists to test the ability of the model to cope with ''' '''left-padding, such as in batched generation. The output for the sequence below should be the same ''' '''regardless of whether left padding is applied or not. When''', '''Hello, my dog is a little''', ] lowerCamelCase_ =tokenizer(lowerCAmelCase, return_tensors='''tf''', padding=lowerCAmelCase ) lowerCamelCase_ =inputs['''input_ids'''] lowerCamelCase_ =model.generate(input_ids=lowerCAmelCase, attention_mask=inputs['''attention_mask'''], max_new_tokens=12 ) lowerCamelCase_ =tokenizer(sentences[0], return_tensors='''tf''' ).input_ids lowerCamelCase_ =model.generate(input_ids=lowerCAmelCase, max_new_tokens=12 ) lowerCamelCase_ =tokenizer(sentences[1], return_tensors='''tf''' ).input_ids lowerCamelCase_ =model.generate(input_ids=lowerCAmelCase, max_new_tokens=12 ) lowerCamelCase_ =tokenizer.batch_decode(lowerCAmelCase, skip_special_tokens=lowerCAmelCase ) lowerCamelCase_ =tokenizer.decode(output_non_padded[0], skip_special_tokens=lowerCAmelCase ) lowerCamelCase_ =tokenizer.decode(output_padded[0], skip_special_tokens=lowerCAmelCase ) lowerCamelCase_ =[ '''This is an extremelly long sentence that only exists to test the ability of the model to cope with ''' '''left-padding, such as in batched generation. The output for the sequence below should be the same ''' '''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be ''' '''a single''', '''Hello, my dog is a little bit of a shy one, but he is very friendly''', ] self.assertListEqual(lowerCAmelCase, lowerCAmelCase ) self.assertListEqual(lowerCAmelCase, [non_padded_sentence, padded_sentence] )
75
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ : int = { """configuration_clipseg""": [ """CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CLIPSegConfig""", """CLIPSegTextConfig""", """CLIPSegVisionConfig""", ], """processing_clipseg""": ["""CLIPSegProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[Any] = [ """CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST""", """CLIPSegModel""", """CLIPSegPreTrainedModel""", """CLIPSegTextModel""", """CLIPSegVisionModel""", """CLIPSegForImageSegmentation""", ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys a_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
75
'''simple docstring''' import unittest from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __UpperCamelCase : @staticmethod def lowercase__ ( *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" pass @is_pipeline_test @require_vision @require_torch class __UpperCamelCase ( unittest.TestCase ): lowercase : int =MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =pipeline( '''zero-shot-object-detection''', model='''hf-internal-testing/tiny-random-owlvit-object-detection''' ) lowerCamelCase_ =[ { '''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''', '''candidate_labels''': ['''cat''', '''remote''', '''couch'''], } ] return object_detector, examples def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =object_detector(examples[0], threshold=0.0 ) lowerCamelCase_ =len(lowerCAmelCase ) self.assertGreater(lowerCAmelCase, 0 ) self.assertEqual( lowerCAmelCase, [ { '''score''': ANY(lowerCAmelCase ), '''label''': ANY(lowerCAmelCase ), '''box''': {'''xmin''': ANY(lowerCAmelCase ), '''ymin''': ANY(lowerCAmelCase ), '''xmax''': ANY(lowerCAmelCase ), '''ymax''': ANY(lowerCAmelCase )}, } for i in range(lowerCAmelCase ) ], ) @require_tf @unittest.skip('''Zero Shot Object Detection not implemented in TF''' ) def lowercase__ ( self ): """simple docstring""" pass @require_torch def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =pipeline( '''zero-shot-object-detection''', model='''hf-internal-testing/tiny-random-owlvit-object-detection''' ) lowerCamelCase_ =object_detector( '''./tests/fixtures/tests_samples/COCO/000000039769.png''', candidate_labels=['''cat''', '''remote''', '''couch'''], threshold=0.6_4, ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=4 ), [ {'''score''': 0.7_2_3_5, '''label''': '''cat''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}}, {'''score''': 0.7_2_1_8, '''label''': '''remote''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}}, {'''score''': 0.7_1_8_4, '''label''': '''couch''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}}, {'''score''': 0.6_7_4_8, '''label''': '''remote''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}}, {'''score''': 0.6_6_5_6, '''label''': '''cat''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}}, {'''score''': 0.6_6_1_4, '''label''': '''couch''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}}, {'''score''': 0.6_4_5_6, '''label''': '''remote''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}}, {'''score''': 0.6_4_2, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 274, '''xmax''': 93, '''ymax''': 297}}, {'''score''': 0.6_4_1_9, '''label''': '''cat''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}}, ], ) lowerCamelCase_ =object_detector( [ { '''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''', '''candidate_labels''': ['''cat''', '''remote''', '''couch'''], } ], threshold=0.6_4, ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=4 ), [ [ {'''score''': 0.7_2_3_5, '''label''': '''cat''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}}, {'''score''': 0.7_2_1_8, '''label''': '''remote''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}}, {'''score''': 0.7_1_8_4, '''label''': '''couch''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}}, {'''score''': 0.6_7_4_8, '''label''': '''remote''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}}, {'''score''': 0.6_6_5_6, '''label''': '''cat''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}}, {'''score''': 0.6_6_1_4, '''label''': '''couch''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}}, {'''score''': 0.6_4_5_6, '''label''': '''remote''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}}, {'''score''': 0.6_4_2, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 274, '''xmax''': 93, '''ymax''': 297}}, {'''score''': 0.6_4_1_9, '''label''': '''cat''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}}, ] ], ) @require_torch @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =pipeline('''zero-shot-object-detection''' ) lowerCamelCase_ =object_detector( '''http://images.cocodataset.org/val2017/000000039769.jpg''', candidate_labels=['''cat''', '''remote''', '''couch'''], ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=4 ), [ {'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}}, {'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}}, {'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}}, {'''score''': 0.1_4_7_4, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}}, {'''score''': 0.1_2_0_8, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}}, ], ) lowerCamelCase_ =object_detector( [ { '''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''', '''candidate_labels''': ['''cat''', '''remote''', '''couch'''], }, { '''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''', '''candidate_labels''': ['''cat''', '''remote''', '''couch'''], }, ], ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=4 ), [ [ {'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}}, {'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}}, {'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}}, {'''score''': 0.1_4_7_4, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}}, {'''score''': 0.1_2_0_8, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}}, ], [ {'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}}, {'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}}, {'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}}, {'''score''': 0.1_4_7_4, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}}, {'''score''': 0.1_2_0_8, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}}, ], ], ) @require_tf @unittest.skip('''Zero Shot Object Detection not implemented in TF''' ) def lowercase__ ( self ): """simple docstring""" pass @require_torch @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =0.2 lowerCamelCase_ =pipeline('''zero-shot-object-detection''' ) lowerCamelCase_ =object_detector( '''http://images.cocodataset.org/val2017/000000039769.jpg''', candidate_labels=['''cat''', '''remote''', '''couch'''], threshold=lowerCAmelCase, ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=4 ), [ {'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}}, {'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}}, {'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}}, ], ) @require_torch @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =2 lowerCamelCase_ =pipeline('''zero-shot-object-detection''' ) lowerCamelCase_ =object_detector( '''http://images.cocodataset.org/val2017/000000039769.jpg''', candidate_labels=['''cat''', '''remote''', '''couch'''], top_k=lowerCAmelCase, ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=4 ), [ {'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}}, {'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}}, ], )
75
1
'''simple docstring''' import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging a_ : Optional[int] = logging.get_logger(__name__) a_ : List[str] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""} # See all LED models at https://huggingface.co/models?filter=LED a_ : Any = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } a_ : List[str] = { """allenai/led-base-16384""": 1_63_84, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def a_ ( ) -> Tuple: """simple docstring""" lowerCamelCase_ =( list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) ) ) lowerCamelCase_ =bs[:] lowerCamelCase_ =0 for b in range(2**8 ): if b not in bs: bs.append(__snake_case ) cs.append(2**8 + n ) n += 1 lowerCamelCase_ =[chr(__snake_case ) for n in cs] return dict(zip(__snake_case , __snake_case ) ) def a_ ( __snake_case : List[Any] ) -> List[Any]: """simple docstring""" lowerCamelCase_ =set() lowerCamelCase_ =word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCamelCase_ =char return pairs class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Union[str, Any] =VOCAB_FILES_NAMES lowercase : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP lowercase : List[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : Any =['input_ids', 'attention_mask'] def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase="replace", lowerCAmelCase="<s>", lowerCAmelCase="</s>", lowerCAmelCase="</s>", lowerCAmelCase="<s>", lowerCAmelCase="<unk>", lowerCAmelCase="<pad>", lowerCAmelCase="<mask>", lowerCAmelCase=False, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =AddedToken(lowerCAmelCase, lstrip=lowerCAmelCase, rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase, lowerCAmelCase ) else bos_token lowerCamelCase_ =AddedToken(lowerCAmelCase, lstrip=lowerCAmelCase, rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase, lowerCAmelCase ) else eos_token lowerCamelCase_ =AddedToken(lowerCAmelCase, lstrip=lowerCAmelCase, rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase, lowerCAmelCase ) else sep_token lowerCamelCase_ =AddedToken(lowerCAmelCase, lstrip=lowerCAmelCase, rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase, lowerCAmelCase ) else cls_token lowerCamelCase_ =AddedToken(lowerCAmelCase, lstrip=lowerCAmelCase, rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase, lowerCAmelCase ) else unk_token lowerCamelCase_ =AddedToken(lowerCAmelCase, lstrip=lowerCAmelCase, rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase, lowerCAmelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase_ =AddedToken(lowerCAmelCase, lstrip=lowerCAmelCase, rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase, lowerCAmelCase ) else mask_token super().__init__( errors=lowerCAmelCase, bos_token=lowerCAmelCase, eos_token=lowerCAmelCase, unk_token=lowerCAmelCase, sep_token=lowerCAmelCase, cls_token=lowerCAmelCase, pad_token=lowerCAmelCase, mask_token=lowerCAmelCase, add_prefix_space=lowerCAmelCase, **lowerCAmelCase, ) with open(lowerCAmelCase, encoding='''utf-8''' ) as vocab_handle: lowerCamelCase_ =json.load(lowerCAmelCase ) lowerCamelCase_ ={v: k for k, v in self.encoder.items()} lowerCamelCase_ =errors # how to handle errors in decoding lowerCamelCase_ =bytes_to_unicode() lowerCamelCase_ ={v: k for k, v in self.byte_encoder.items()} with open(lowerCAmelCase, encoding='''utf-8''' ) as merges_handle: lowerCamelCase_ =merges_handle.read().split('''\n''' )[1:-1] lowerCamelCase_ =[tuple(merge.split() ) for merge in bpe_merges] lowerCamelCase_ =dict(zip(lowerCAmelCase, range(len(lowerCAmelCase ) ) ) ) lowerCamelCase_ ={} lowerCamelCase_ =add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions lowerCamelCase_ =re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def lowercase__ ( self ): """simple docstring""" return len(self.encoder ) def lowercase__ ( self ): """simple docstring""" return dict(self.encoder, **self.added_tokens_encoder ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" if token in self.cache: return self.cache[token] lowerCamelCase_ =tuple(lowerCAmelCase ) lowerCamelCase_ =get_pairs(lowerCAmelCase ) if not pairs: return token while True: lowerCamelCase_ =min(lowerCAmelCase, key=lambda lowerCAmelCase : self.bpe_ranks.get(lowerCAmelCase, float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break lowerCamelCase_, lowerCamelCase_ =bigram lowerCamelCase_ =[] lowerCamelCase_ =0 while i < len(lowerCAmelCase ): try: lowerCamelCase_ =word.index(lowerCAmelCase, lowerCAmelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCamelCase_ =j if word[i] == first and i < len(lowerCAmelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCamelCase_ =tuple(lowerCAmelCase ) lowerCamelCase_ =new_word if len(lowerCAmelCase ) == 1: break else: lowerCamelCase_ =get_pairs(lowerCAmelCase ) lowerCamelCase_ =''' '''.join(lowerCAmelCase ) lowerCamelCase_ =word return word def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =[] for token in re.findall(self.pat, lowerCAmelCase ): lowerCamelCase_ =''''''.join( self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase ).split(''' ''' ) ) return bpe_tokens def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return self.encoder.get(lowerCAmelCase, self.encoder.get(self.unk_token ) ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return self.decoder.get(lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =''''''.join(lowerCAmelCase ) lowerCamelCase_ =bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''', errors=self.errors ) return text def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" if not os.path.isdir(lowerCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCamelCase_ =os.path.join( lowerCAmelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCamelCase_ =os.path.join( lowerCAmelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(lowerCAmelCase, '''w''', encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=lowerCAmelCase, ensure_ascii=lowerCAmelCase ) + '''\n''' ) lowerCamelCase_ =0 with open(lowerCAmelCase, '''w''', encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda lowerCAmelCase : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ''' Please check that the tokenizer is not corrupted!''' ) lowerCamelCase_ =token_index writer.write(''' '''.join(lowerCAmelCase ) + '''\n''' ) index += 1 return vocab_file, merge_file def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCamelCase_ =[self.cls_token_id] lowerCamelCase_ =[self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCAmelCase, token_ids_a=lowerCAmelCase, already_has_special_tokens=lowerCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(lowerCAmelCase )) + [1] return [1] + ([0] * len(lowerCAmelCase )) + [1, 1] + ([0] * len(lowerCAmelCase )) + [1] def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" lowerCamelCase_ =[self.sep_token_id] lowerCamelCase_ =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=False, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =kwargs.pop('''add_prefix_space''', self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase ) > 0 and not text[0].isspace()): lowerCamelCase_ =''' ''' + text return (text, kwargs) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = PaddingStrategy.DO_NOT_PAD, lowerCAmelCase = None, lowerCAmelCase = None, ): """simple docstring""" lowerCamelCase_ =super()._pad( encoded_inputs=lowerCAmelCase, max_length=lowerCAmelCase, padding_strategy=lowerCAmelCase, pad_to_multiple_of=lowerCAmelCase, return_attention_mask=lowerCAmelCase, ) # Load from model defaults if return_attention_mask is None: lowerCamelCase_ ='''attention_mask''' in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: lowerCamelCase_ =encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. lowerCamelCase_ =len(encoded_inputs['''global_attention_mask'''] ) != len(lowerCAmelCase ) if needs_to_be_padded: lowerCamelCase_ =len(lowerCAmelCase ) - len(encoded_inputs['''global_attention_mask'''] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` lowerCamelCase_ =( encoded_inputs['''global_attention_mask'''] + [-1] * difference ) elif self.padding_side == "left": lowerCamelCase_ =[-1] * difference + encoded_inputs[ '''global_attention_mask''' ] else: raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) ) return encoded_inputs
75
'''simple docstring''' import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a_ : Optional[int] = logging.get_logger(__name__) a_ : Optional[int] = { """vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_config_file""": """tokenizer_config.json""", } a_ : List[Any] = { """vocab_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json""" }, """merges_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt""" }, """tokenizer_config_file""": { """facebook/blenderbot_small-90M""": ( """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json""" ) }, } a_ : Optional[int] = {"""facebook/blenderbot_small-90M""": 5_12} def a_ ( __snake_case : List[Any] ) -> Tuple: """simple docstring""" lowerCamelCase_ =set() lowerCamelCase_ =word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCamelCase_ =char lowerCamelCase_ =set(__snake_case ) return pairs class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[int] =VOCAB_FILES_NAMES lowercase : Tuple =PRETRAINED_VOCAB_FILES_MAP lowercase : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : Dict =['input_ids', 'attention_mask'] def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase="__start__", lowerCAmelCase="__end__", lowerCAmelCase="__unk__", lowerCAmelCase="__null__", **lowerCAmelCase, ): """simple docstring""" super().__init__(unk_token=lowerCAmelCase, bos_token=lowerCAmelCase, eos_token=lowerCAmelCase, pad_token=lowerCAmelCase, **lowerCAmelCase ) with open(lowerCAmelCase, encoding='''utf-8''' ) as vocab_handle: lowerCamelCase_ =json.load(lowerCAmelCase ) lowerCamelCase_ ={v: k for k, v in self.encoder.items()} with open(lowerCAmelCase, encoding='''utf-8''' ) as merges_handle: lowerCamelCase_ =merges_handle.read().split('''\n''' )[1:-1] lowerCamelCase_ =[tuple(merge.split() ) for merge in merges] lowerCamelCase_ =dict(zip(lowerCAmelCase, range(len(lowerCAmelCase ) ) ) ) lowerCamelCase_ ={} @property def lowercase__ ( self ): """simple docstring""" return len(self.encoder ) def lowercase__ ( self ): """simple docstring""" return dict(self.encoder, **self.added_tokens_encoder ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" if token in self.cache: return self.cache[token] lowerCamelCase_ =re.sub('''([.,!?()])''', R''' \1''', lowerCAmelCase ) lowerCamelCase_ =re.sub('''(\')''', R''' \1 ''', lowerCAmelCase ) lowerCamelCase_ =re.sub(R'''\s{2,}''', ''' ''', lowerCAmelCase ) if "\n" in token: lowerCamelCase_ =token.replace('''\n''', ''' __newln__''' ) lowerCamelCase_ =token.split(''' ''' ) lowerCamelCase_ =[] for token in tokens: if not len(lowerCAmelCase ): continue lowerCamelCase_ =token.lower() lowerCamelCase_ =tuple(lowerCAmelCase ) lowerCamelCase_ =tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] ) lowerCamelCase_ =get_pairs(lowerCAmelCase ) if not pairs: words.append(lowerCAmelCase ) continue while True: lowerCamelCase_ =min(lowerCAmelCase, key=lambda lowerCAmelCase : self.bpe_ranks.get(lowerCAmelCase, float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break lowerCamelCase_, lowerCamelCase_ =bigram lowerCamelCase_ =[] lowerCamelCase_ =0 while i < len(lowerCAmelCase ): try: lowerCamelCase_ =word.index(lowerCAmelCase, lowerCAmelCase ) new_word.extend(word[i:j] ) lowerCamelCase_ =j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(lowerCAmelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCamelCase_ =tuple(lowerCAmelCase ) lowerCamelCase_ =new_word if len(lowerCAmelCase ) == 1: break else: lowerCamelCase_ =get_pairs(lowerCAmelCase ) lowerCamelCase_ ='''@@ '''.join(lowerCAmelCase ) lowerCamelCase_ =word[:-4] lowerCamelCase_ =word words.append(lowerCAmelCase ) return " ".join(lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ =re.findall(R'''\S+\n?''', lowerCAmelCase ) for token in words: split_tokens.extend(list(self.bpe(lowerCAmelCase ).split(''' ''' ) ) ) return split_tokens def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =token.lower() return self.encoder.get(lowerCAmelCase, self.encoder.get(self.unk_token ) ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return self.decoder.get(lowerCAmelCase, self.unk_token ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =''' '''.join(lowerCAmelCase ).replace('''@@ ''', '''''' ).strip() return out_string def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" if not os.path.isdir(lowerCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCamelCase_ =os.path.join( lowerCAmelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCamelCase_ =os.path.join( lowerCAmelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(lowerCAmelCase, '''w''', encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=lowerCAmelCase, ensure_ascii=lowerCAmelCase ) + '''\n''' ) lowerCamelCase_ =0 with open(lowerCAmelCase, '''w''', encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda lowerCAmelCase : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ''' Please check that the tokenizer is not corrupted!''' ) lowerCamelCase_ =token_index writer.write(''' '''.join(lowerCAmelCase ) + '''\n''' ) index += 1 return vocab_file, merge_file
75
1
'''simple docstring''' import itertools import math def a_ ( __snake_case : int ) -> bool: """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__snake_case ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def a_ ( ) -> List[str]: """simple docstring""" lowerCamelCase_ =2 while True: if is_prime(__snake_case ): yield num num += 1 def a_ ( __snake_case : int = 1_0001 ) -> int: """simple docstring""" return next(itertools.islice(prime_generator() , nth - 1 , __snake_case ) ) if __name__ == "__main__": print(F"""{solution() = }""")
75
'''simple docstring''' from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Dict = logging.get_logger(__name__) a_ : Any = { """snap-research/efficientformer-l1-300""": ( """https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json""" ), } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : List[str] ='efficientformer' def __init__( self, lowerCAmelCase = [3, 2, 6, 4], lowerCAmelCase = [48, 96, 224, 448], lowerCAmelCase = [True, True, True, True], lowerCAmelCase = 448, lowerCAmelCase = 32, lowerCAmelCase = 4, lowerCAmelCase = 7, lowerCAmelCase = 5, lowerCAmelCase = 8, lowerCAmelCase = 4, lowerCAmelCase = 0.0, lowerCAmelCase = 16, lowerCAmelCase = 3, lowerCAmelCase = 3, lowerCAmelCase = 3, lowerCAmelCase = 2, lowerCAmelCase = 1, lowerCAmelCase = 0.0, lowerCAmelCase = 1, lowerCAmelCase = True, lowerCAmelCase = True, lowerCAmelCase = 1e-5, lowerCAmelCase = "gelu", lowerCAmelCase = 0.0_2, lowerCAmelCase = 1e-12, lowerCAmelCase = 224, lowerCAmelCase = 1e-05, **lowerCAmelCase, ): """simple docstring""" super().__init__(**lowerCAmelCase ) lowerCamelCase_ =hidden_act lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =hidden_sizes lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =initializer_range lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =patch_size lowerCamelCase_ =num_channels lowerCamelCase_ =depths lowerCamelCase_ =mlp_expansion_ratio lowerCamelCase_ =downsamples lowerCamelCase_ =dim lowerCamelCase_ =key_dim lowerCamelCase_ =attention_ratio lowerCamelCase_ =resolution lowerCamelCase_ =pool_size lowerCamelCase_ =downsample_patch_size lowerCamelCase_ =downsample_stride lowerCamelCase_ =downsample_pad lowerCamelCase_ =drop_path_rate lowerCamelCase_ =num_metaad_blocks lowerCamelCase_ =distillation lowerCamelCase_ =use_layer_scale lowerCamelCase_ =layer_scale_init_value lowerCamelCase_ =image_size lowerCamelCase_ =batch_norm_eps
75
1
'''simple docstring''' import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() a_ : Tuple = logging.get_logger(__name__) a_ : List[Any] = [ ["""attention""", """attn"""], ["""encoder_attention""", """encoder_attn"""], ["""q_lin""", """q_proj"""], ["""k_lin""", """k_proj"""], ["""v_lin""", """v_proj"""], ["""out_lin""", """out_proj"""], ["""norm_embeddings""", """layernorm_embedding"""], ["""position_embeddings""", """embed_positions"""], ["""embeddings""", """embed_tokens"""], ["""ffn.lin""", """fc"""], ] def a_ ( __snake_case : List[str] ) -> Optional[int]: """simple docstring""" if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: lowerCamelCase_ =k.replace(__snake_case , __snake_case ) if k.startswith('''encoder''' ): lowerCamelCase_ =k.replace('''.attn''' , '''.self_attn''' ) lowerCamelCase_ =k.replace('''norm1''' , '''self_attn_layer_norm''' ) lowerCamelCase_ =k.replace('''norm2''' , '''final_layer_norm''' ) elif k.startswith('''decoder''' ): lowerCamelCase_ =k.replace('''norm1''' , '''self_attn_layer_norm''' ) lowerCamelCase_ =k.replace('''norm2''' , '''encoder_attn_layer_norm''' ) lowerCamelCase_ =k.replace('''norm3''' , '''final_layer_norm''' ) return k def a_ ( __snake_case : Optional[Any] ) -> int: """simple docstring""" lowerCamelCase_ =[ '''model.encoder.layernorm_embedding.weight''', '''model.encoder.layernorm_embedding.bias''', '''model.decoder.layernorm_embedding.weight''', '''model.decoder.layernorm_embedding.bias''', ] for k in keys: lowerCamelCase_ =sd.pop(__snake_case ) lowerCamelCase_ =k.replace('''layernorm_embedding''' , '''layer_norm''' ) assert new_k not in sd lowerCamelCase_ =v a_ : Union[str, Any] = ["""START"""] @torch.no_grad() def a_ ( __snake_case : Dict , __snake_case : List[str] , __snake_case : Any ) -> Dict: """simple docstring""" lowerCamelCase_ =torch.load(__snake_case , map_location='''cpu''' ) lowerCamelCase_ =model['''model'''] lowerCamelCase_ =BlenderbotConfig.from_json_file(__snake_case ) lowerCamelCase_ =BlenderbotForConditionalGeneration(__snake_case ) lowerCamelCase_ =m.model.state_dict().keys() lowerCamelCase_ =[] lowerCamelCase_ ={} for k, v in sd.items(): if k in IGNORE_KEYS: continue lowerCamelCase_ =rename_state_dict_key(__snake_case ) if new_k not in valid_keys: failures.append([k, new_k] ) else: lowerCamelCase_ =v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(__snake_case ) m.model.load_state_dict(__snake_case , strict=__snake_case ) m.half() m.save_pretrained(__snake_case ) if __name__ == "__main__": a_ : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument("""--src_path""", type=str, help="""like blenderbot-model.bin""") parser.add_argument("""--save_dir""", default="""hf_blenderbot""", type=str, help="""Where to save converted model.""") parser.add_argument( """--hf_config_json""", default="""blenderbot-3b-config.json""", type=str, help="""Path to config to use""" ) a_ : Optional[int] = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
75
'''simple docstring''' import itertools import random import unittest import numpy as np from transformers import is_speech_available from transformers.testing_utils import require_torch, require_torchaudio from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import SpeechaTextFeatureExtractor a_ : Union[str, Any] = random.Random() def a_ ( __snake_case : int , __snake_case : int=1.0 , __snake_case : Tuple=None , __snake_case : Union[str, Any]=None ) -> str: """simple docstring""" if rng is None: lowerCamelCase_ =global_rng lowerCamelCase_ =[] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class __UpperCamelCase ( unittest.TestCase ): def __init__( self, lowerCAmelCase, lowerCAmelCase=7, lowerCAmelCase=400, lowerCAmelCase=2_000, lowerCAmelCase=24, lowerCAmelCase=24, lowerCAmelCase=0.0, lowerCAmelCase=16_000, lowerCAmelCase=True, lowerCAmelCase=True, ): """simple docstring""" lowerCamelCase_ =parent lowerCamelCase_ =batch_size lowerCamelCase_ =min_seq_length lowerCamelCase_ =max_seq_length lowerCamelCase_ =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) lowerCamelCase_ =feature_size lowerCamelCase_ =num_mel_bins lowerCamelCase_ =padding_value lowerCamelCase_ =sampling_rate lowerCamelCase_ =return_attention_mask lowerCamelCase_ =do_normalize def lowercase__ ( self ): """simple docstring""" return { "feature_size": self.feature_size, "num_mel_bins": self.num_mel_bins, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def lowercase__ ( self, lowerCAmelCase=False, lowerCAmelCase=False ): """simple docstring""" def _flatten(lowerCAmelCase ): return list(itertools.chain(*lowerCAmelCase ) ) if equal_length: lowerCamelCase_ =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size lowerCamelCase_ =[ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff ) ] if numpify: lowerCamelCase_ =[np.asarray(lowerCAmelCase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ): lowercase : Any =SpeechaTextFeatureExtractor if is_speech_available() else None def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =SpeechaTextFeatureExtractionTester(self ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" self.assertTrue(np.all(np.mean(lowerCAmelCase, axis=0 ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase, axis=0 ) - 1 ) < 1e-3 ) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 lowerCamelCase_ =[floats_list((1, x) )[0] for x in range(800, 1_400, 200 )] lowerCamelCase_ =[np.asarray(lowerCAmelCase ) for speech_input in speech_inputs] # Test feature size lowerCamelCase_ =feature_extractor(lowerCAmelCase, padding=lowerCAmelCase, return_tensors='''np''' ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size ) # Test not batched input lowerCamelCase_ =feature_extractor(speech_inputs[0], return_tensors='''np''' ).input_features lowerCamelCase_ =feature_extractor(np_speech_inputs[0], return_tensors='''np''' ).input_features self.assertTrue(np.allclose(lowerCAmelCase, lowerCAmelCase, atol=1e-3 ) ) # Test batched lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''np''' ).input_features lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(lowerCAmelCase, lowerCAmelCase ): self.assertTrue(np.allclose(lowerCAmelCase, lowerCAmelCase, atol=1e-3 ) ) # Test 2-D numpy arrays are batched. lowerCamelCase_ =[floats_list((1, x) )[0] for x in (800, 800, 800)] lowerCamelCase_ =np.asarray(lowerCAmelCase ) lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''np''' ).input_features lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(lowerCAmelCase, lowerCAmelCase ): self.assertTrue(np.allclose(lowerCAmelCase, lowerCAmelCase, atol=1e-3 ) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase_ =[floats_list((1, x) )[0] for x in range(800, 1_400, 200 )] lowerCamelCase_ =['''longest''', '''max_length''', '''do_not_pad'''] lowerCamelCase_ =[None, 16, None] for max_length, padding in zip(lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =feature_extractor( lowerCAmelCase, padding=lowerCAmelCase, max_length=lowerCAmelCase, return_attention_mask=lowerCAmelCase ) lowerCamelCase_ =inputs.input_features lowerCamelCase_ =inputs.attention_mask lowerCamelCase_ =[np.sum(lowerCAmelCase ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase_ =[floats_list((1, x) )[0] for x in range(800, 1_400, 200 )] lowerCamelCase_ =['''longest''', '''max_length''', '''do_not_pad'''] lowerCamelCase_ =[None, 16, None] for max_length, padding in zip(lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =feature_extractor( lowerCAmelCase, max_length=lowerCAmelCase, padding=lowerCAmelCase, return_tensors='''np''', return_attention_mask=lowerCAmelCase ) lowerCamelCase_ =inputs.input_features lowerCamelCase_ =inputs.attention_mask lowerCamelCase_ =[np.sum(lowerCAmelCase ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase_ =[floats_list((1, x) )[0] for x in range(800, 1_400, 200 )] lowerCamelCase_ =feature_extractor( lowerCAmelCase, padding='''max_length''', max_length=4, truncation=lowerCAmelCase, return_tensors='''np''', return_attention_mask=lowerCAmelCase, ) lowerCamelCase_ =inputs.input_features lowerCamelCase_ =inputs.attention_mask lowerCamelCase_ =np.sum(attention_mask == 1, axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1] ) self._check_zero_mean_unit_variance(input_features[2] ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase_ =[floats_list((1, x) )[0] for x in range(800, 1_400, 200 )] lowerCamelCase_ =feature_extractor( lowerCAmelCase, padding='''longest''', max_length=4, truncation=lowerCAmelCase, return_tensors='''np''', return_attention_mask=lowerCAmelCase, ) lowerCamelCase_ =inputs.input_features lowerCamelCase_ =inputs.attention_mask lowerCamelCase_ =np.sum(attention_mask == 1, axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape, (3, 4, 24) ) lowerCamelCase_ =[floats_list((1, x) )[0] for x in range(800, 1_400, 200 )] lowerCamelCase_ =feature_extractor( lowerCAmelCase, padding='''longest''', max_length=16, truncation=lowerCAmelCase, return_tensors='''np''', return_attention_mask=lowerCAmelCase, ) lowerCamelCase_ =inputs.input_features lowerCamelCase_ =inputs.attention_mask lowerCamelCase_ =np.sum(attention_mask == 1, axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape, (3, 6, 24) ) def lowercase__ ( self ): """simple docstring""" import torch lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase_ =np.random.rand(100, 32 ).astype(np.floataa ) lowerCamelCase_ =np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: lowerCamelCase_ =feature_extractor.pad([{'''input_features''': inputs}], return_tensors='''np''' ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) lowerCamelCase_ =feature_extractor.pad([{'''input_features''': inputs}], return_tensors='''pt''' ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" from datasets import load_dataset lowerCamelCase_ =load_dataset('''hf-internal-testing/librispeech_asr_dummy''', '''clean''', split='''validation''' ) # automatic decoding with librispeech lowerCamelCase_ =ds.sort('''id''' ).select(range(lowerCAmelCase ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =np.array([ -1.5_7_4_5, -1.7_7_1_3, -1.7_0_2_0, -1.6_0_6_9, -1.2_2_5_0, -1.1_1_0_5, -0.9_0_7_2, -0.8_2_4_1, -1.2_3_1_0, -0.8_0_9_8, -0.3_3_2_0, -0.4_1_0_1, -0.7_9_8_5, -0.4_9_9_6, -0.8_2_1_3, -0.9_1_2_8, -1.0_4_2_0, -1.1_2_8_6, -1.0_4_4_0, -0.7_9_9_9, -0.8_4_0_5, -1.2_2_7_5, -1.5_4_4_3, -1.4_6_2_5, ] ) # fmt: on lowerCamelCase_ =self._load_datasamples(1 ) lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''pt''' ).input_features self.assertEquals(input_features.shape, (1, 584, 24) ) self.assertTrue(np.allclose(input_features[0, 0, :30], lowerCAmelCase, atol=1e-4 ) )
75
1
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.test_utils import execute_subprocess_async def a_ ( __snake_case : Dict=None ) -> Dict: """simple docstring""" if subparsers is not None: lowerCamelCase_ =subparsers.add_parser('''test''' ) else: lowerCamelCase_ =argparse.ArgumentParser('''Accelerate test command''' ) parser.add_argument( '''--config_file''' , default=__snake_case , help=( '''The path to use to store the config file. Will default to a file named default_config.yaml in the cache ''' '''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ''' '''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ''' '''with \'huggingface\'.''' ) , ) if subparsers is not None: parser.set_defaults(func=__snake_case ) return parser def a_ ( __snake_case : Optional[Any] ) -> Any: """simple docstring""" lowerCamelCase_ =os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] ) if args.config_file is None: lowerCamelCase_ =script_name else: lowerCamelCase_ =F'''--config_file={args.config_file} {script_name}''' lowerCamelCase_ =['''accelerate-launch'''] + test_args.split() lowerCamelCase_ =execute_subprocess_async(__snake_case , env=os.environ.copy() ) if result.returncode == 0: print('''Test is a success! You are ready for your distributed training!''' ) def a_ ( ) -> List[Any]: """simple docstring""" lowerCamelCase_ =test_command_parser() lowerCamelCase_ =parser.parse_args() test_command(__snake_case ) if __name__ == "__main__": main()
75
'''simple docstring''' def a_ ( __snake_case : Any , __snake_case : List[str] ) -> str: """simple docstring""" lowerCamelCase_ ='''''' for i in table: res += inp[i - 1] return res def a_ ( __snake_case : List[str] ) -> Optional[int]: """simple docstring""" return data[1:] + data[0] def a_ ( __snake_case : str , __snake_case : Tuple ) -> int: """simple docstring""" lowerCamelCase_ ='''''' for i in range(len(__snake_case ) ): if a[i] == b[i]: res += "0" else: res += "1" return res def a_ ( __snake_case : Optional[Any] , __snake_case : Tuple ) -> List[Any]: """simple docstring""" lowerCamelCase_ =int('''0b''' + data[0] + data[-1] , 2 ) lowerCamelCase_ =int('''0b''' + data[1:3] , 2 ) return bin(s[row][col] )[2:] def a_ ( __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : int , __snake_case : Tuple , __snake_case : List[Any] ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ =message[:4] lowerCamelCase_ =message[4:] lowerCamelCase_ =apply_table(__snake_case , __snake_case ) lowerCamelCase_ =xor(__snake_case , __snake_case ) lowerCamelCase_ =apply_sbox(__snake_case , temp[:4] ) # noqa: E741 lowerCamelCase_ =apply_sbox(__snake_case , temp[4:] ) lowerCamelCase_ ='''0''' * (2 - len(__snake_case )) + l # noqa: E741 lowerCamelCase_ ='''0''' * (2 - len(__snake_case )) + r lowerCamelCase_ =apply_table(l + r , __snake_case ) lowerCamelCase_ =xor(__snake_case , __snake_case ) return temp + right if __name__ == "__main__": a_ : Any = input("""Enter 10 bit key: """) a_ : Any = input("""Enter 8 bit message: """) a_ : str = [6, 3, 7, 4, 8, 5, 10, 9] a_ : str = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6] a_ : str = [2, 4, 3, 1] a_ : Optional[int] = [2, 6, 3, 1, 4, 8, 5, 7] a_ : Optional[Any] = [4, 1, 3, 5, 7, 2, 8, 6] a_ : Union[str, Any] = [4, 1, 2, 3, 2, 3, 4, 1] a_ : int = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]] a_ : Any = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]] # key generation a_ : List[Any] = apply_table(key, paa_table) a_ : str = temp[:5] a_ : Optional[Any] = temp[5:] a_ : Tuple = left_shift(left) a_ : Optional[Any] = left_shift(right) a_ : str = apply_table(left + right, pa_table) a_ : Optional[Any] = left_shift(left) a_ : Tuple = left_shift(right) a_ : Union[str, Any] = left_shift(left) a_ : List[str] = left_shift(right) a_ : Optional[int] = apply_table(left + right, pa_table) # encryption a_ : Optional[int] = apply_table(message, IP) a_ : List[Any] = function(expansion, sa, sa, keya, temp) a_ : str = temp[4:] + temp[:4] a_ : List[str] = function(expansion, sa, sa, keya, temp) a_ : Union[str, Any] = apply_table(temp, IP_inv) print("""Cipher text is:""", CT) # decryption a_ : Optional[int] = apply_table(CT, IP) a_ : List[Any] = function(expansion, sa, sa, keya, temp) a_ : int = temp[4:] + temp[:4] a_ : int = function(expansion, sa, sa, keya, temp) a_ : Optional[int] = apply_table(temp, IP_inv) print("""Plain text after decypting is:""", PT)
75
1
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Dict = logging.get_logger(__name__) a_ : int = { """microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""", } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Union[str, Any] ='git_vision_model' def __init__( self, lowerCAmelCase=768, lowerCAmelCase=3_072, lowerCAmelCase=12, lowerCAmelCase=12, lowerCAmelCase=3, lowerCAmelCase=224, lowerCAmelCase=16, lowerCAmelCase="quick_gelu", lowerCAmelCase=1e-5, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, **lowerCAmelCase, ): """simple docstring""" super().__init__(**lowerCAmelCase ) lowerCamelCase_ =hidden_size lowerCamelCase_ =intermediate_size lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =num_channels lowerCamelCase_ =patch_size lowerCamelCase_ =image_size lowerCamelCase_ =initializer_range lowerCamelCase_ =attention_dropout lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =hidden_act @classmethod def lowercase__ ( cls, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" cls._set_token_in_kwargs(lowerCAmelCase ) lowerCamelCase_, lowerCamelCase_ =cls.get_config_dict(lowerCAmelCase, **lowerCAmelCase ) # get the vision config dict if we are loading from GITConfig if config_dict.get('''model_type''' ) == "git": lowerCamelCase_ =config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls, '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(lowerCAmelCase, **lowerCAmelCase ) class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Dict ='git' def __init__( self, lowerCAmelCase=None, lowerCAmelCase=30_522, lowerCAmelCase=768, lowerCAmelCase=6, lowerCAmelCase=12, lowerCAmelCase=3_072, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=1_024, lowerCAmelCase=0.0_2, lowerCAmelCase=1e-12, lowerCAmelCase=0, lowerCAmelCase="absolute", lowerCAmelCase=True, lowerCAmelCase=False, lowerCAmelCase=101, lowerCAmelCase=102, lowerCAmelCase=None, **lowerCAmelCase, ): """simple docstring""" super().__init__(bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, pad_token_id=lowerCAmelCase, **lowerCAmelCase ) if vision_config is None: lowerCamelCase_ ={} logger.info('''vision_config is None. initializing the GitVisionConfig with default values.''' ) lowerCamelCase_ =GitVisionConfig(**lowerCAmelCase ) lowerCamelCase_ =vocab_size lowerCamelCase_ =hidden_size lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =hidden_act lowerCamelCase_ =intermediate_size lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =initializer_range lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =position_embedding_type lowerCamelCase_ =use_cache lowerCamelCase_ =tie_word_embeddings lowerCamelCase_ =num_image_with_embedding lowerCamelCase_ =bos_token_id lowerCamelCase_ =eos_token_id def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =copy.deepcopy(self.__dict__ ) lowerCamelCase_ =self.vision_config.to_dict() lowerCamelCase_ =self.__class__.model_type return output
75
'''simple docstring''' import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all feature extractors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...feature_extraction_utils import FeatureExtractionMixin from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) a_ : List[Any] = logging.get_logger(__name__) a_ : Tuple = OrderedDict( [ ("""audio-spectrogram-transformer""", """ASTFeatureExtractor"""), ("""beit""", """BeitFeatureExtractor"""), ("""chinese_clip""", """ChineseCLIPFeatureExtractor"""), ("""clap""", """ClapFeatureExtractor"""), ("""clip""", """CLIPFeatureExtractor"""), ("""clipseg""", """ViTFeatureExtractor"""), ("""conditional_detr""", """ConditionalDetrFeatureExtractor"""), ("""convnext""", """ConvNextFeatureExtractor"""), ("""cvt""", """ConvNextFeatureExtractor"""), ("""data2vec-audio""", """Wav2Vec2FeatureExtractor"""), ("""data2vec-vision""", """BeitFeatureExtractor"""), ("""deformable_detr""", """DeformableDetrFeatureExtractor"""), ("""deit""", """DeiTFeatureExtractor"""), ("""detr""", """DetrFeatureExtractor"""), ("""dinat""", """ViTFeatureExtractor"""), ("""donut-swin""", """DonutFeatureExtractor"""), ("""dpt""", """DPTFeatureExtractor"""), ("""encodec""", """EncodecFeatureExtractor"""), ("""flava""", """FlavaFeatureExtractor"""), ("""glpn""", """GLPNFeatureExtractor"""), ("""groupvit""", """CLIPFeatureExtractor"""), ("""hubert""", """Wav2Vec2FeatureExtractor"""), ("""imagegpt""", """ImageGPTFeatureExtractor"""), ("""layoutlmv2""", """LayoutLMv2FeatureExtractor"""), ("""layoutlmv3""", """LayoutLMv3FeatureExtractor"""), ("""levit""", """LevitFeatureExtractor"""), ("""maskformer""", """MaskFormerFeatureExtractor"""), ("""mctct""", """MCTCTFeatureExtractor"""), ("""mobilenet_v1""", """MobileNetV1FeatureExtractor"""), ("""mobilenet_v2""", """MobileNetV2FeatureExtractor"""), ("""mobilevit""", """MobileViTFeatureExtractor"""), ("""nat""", """ViTFeatureExtractor"""), ("""owlvit""", """OwlViTFeatureExtractor"""), ("""perceiver""", """PerceiverFeatureExtractor"""), ("""poolformer""", """PoolFormerFeatureExtractor"""), ("""regnet""", """ConvNextFeatureExtractor"""), ("""resnet""", """ConvNextFeatureExtractor"""), ("""segformer""", """SegformerFeatureExtractor"""), ("""sew""", """Wav2Vec2FeatureExtractor"""), ("""sew-d""", """Wav2Vec2FeatureExtractor"""), ("""speech_to_text""", """Speech2TextFeatureExtractor"""), ("""speecht5""", """SpeechT5FeatureExtractor"""), ("""swiftformer""", """ViTFeatureExtractor"""), ("""swin""", """ViTFeatureExtractor"""), ("""swinv2""", """ViTFeatureExtractor"""), ("""table-transformer""", """DetrFeatureExtractor"""), ("""timesformer""", """VideoMAEFeatureExtractor"""), ("""tvlt""", """TvltFeatureExtractor"""), ("""unispeech""", """Wav2Vec2FeatureExtractor"""), ("""unispeech-sat""", """Wav2Vec2FeatureExtractor"""), ("""van""", """ConvNextFeatureExtractor"""), ("""videomae""", """VideoMAEFeatureExtractor"""), ("""vilt""", """ViltFeatureExtractor"""), ("""vit""", """ViTFeatureExtractor"""), ("""vit_mae""", """ViTFeatureExtractor"""), ("""vit_msn""", """ViTFeatureExtractor"""), ("""wav2vec2""", """Wav2Vec2FeatureExtractor"""), ("""wav2vec2-conformer""", """Wav2Vec2FeatureExtractor"""), ("""wavlm""", """Wav2Vec2FeatureExtractor"""), ("""whisper""", """WhisperFeatureExtractor"""), ("""xclip""", """CLIPFeatureExtractor"""), ("""yolos""", """YolosFeatureExtractor"""), ] ) a_ : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES) def a_ ( __snake_case : str ) -> Any: """simple docstring""" for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items(): if class_name in extractors: lowerCamelCase_ =model_type_to_module_name(__snake_case ) lowerCamelCase_ =importlib.import_module(F'''.{module_name}''' , '''transformers.models''' ) try: return getattr(__snake_case , __snake_case ) except AttributeError: continue for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items(): if getattr(__snake_case , '''__name__''' , __snake_case ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. lowerCamelCase_ =importlib.import_module('''transformers''' ) if hasattr(__snake_case , __snake_case ): return getattr(__snake_case , __snake_case ) return None def a_ ( __snake_case : Union[str, os.PathLike] , __snake_case : Optional[Union[str, os.PathLike]] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[Dict[str, str]] = None , __snake_case : Optional[Union[bool, str]] = None , __snake_case : Optional[str] = None , __snake_case : bool = False , **__snake_case : Union[str, Any] , ) -> List[str]: """simple docstring""" lowerCamelCase_ =get_file_from_repo( __snake_case , __snake_case , cache_dir=__snake_case , force_download=__snake_case , resume_download=__snake_case , proxies=__snake_case , use_auth_token=__snake_case , revision=__snake_case , local_files_only=__snake_case , ) if resolved_config_file is None: logger.info( '''Could not locate the feature extractor configuration file, will try to use the model config instead.''' ) return {} with open(__snake_case , encoding='''utf-8''' ) as reader: return json.load(__snake_case ) class __UpperCamelCase : def __init__( self ): """simple docstring""" raise EnvironmentError( '''AutoFeatureExtractor is designed to be instantiated ''' '''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' ) @classmethod @replace_list_option_in_docstrings(lowerCAmelCase ) def lowercase__ ( cls, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =kwargs.pop('''config''', lowerCAmelCase ) lowerCamelCase_ =kwargs.pop('''trust_remote_code''', lowerCAmelCase ) lowerCamelCase_ =True lowerCamelCase_, lowerCamelCase_ =FeatureExtractionMixin.get_feature_extractor_dict(lowerCAmelCase, **lowerCAmelCase ) lowerCamelCase_ =config_dict.get('''feature_extractor_type''', lowerCAmelCase ) lowerCamelCase_ =None if "AutoFeatureExtractor" in config_dict.get('''auto_map''', {} ): lowerCamelCase_ =config_dict['''auto_map''']['''AutoFeatureExtractor'''] # If we don't find the feature extractor class in the feature extractor config, let's try the model config. if feature_extractor_class is None and feature_extractor_auto_map is None: if not isinstance(lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase, **lowerCAmelCase ) # It could be in `config.feature_extractor_type`` lowerCamelCase_ =getattr(lowerCAmelCase, '''feature_extractor_type''', lowerCAmelCase ) if hasattr(lowerCAmelCase, '''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map: lowerCamelCase_ =config.auto_map['''AutoFeatureExtractor'''] if feature_extractor_class is not None: lowerCamelCase_ =feature_extractor_class_from_name(lowerCAmelCase ) lowerCamelCase_ =feature_extractor_auto_map is not None lowerCamelCase_ =feature_extractor_class is not None or type(lowerCAmelCase ) in FEATURE_EXTRACTOR_MAPPING lowerCamelCase_ =resolve_trust_remote_code( lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) if has_remote_code and trust_remote_code: lowerCamelCase_ =get_class_from_dynamic_module( lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ) lowerCamelCase_ =kwargs.pop('''code_revision''', lowerCAmelCase ) if os.path.isdir(lowerCAmelCase ): feature_extractor_class.register_for_auto_class() return feature_extractor_class.from_dict(lowerCAmelCase, **lowerCAmelCase ) elif feature_extractor_class is not None: return feature_extractor_class.from_dict(lowerCAmelCase, **lowerCAmelCase ) # Last try: we use the FEATURE_EXTRACTOR_MAPPING. elif type(lowerCAmelCase ) in FEATURE_EXTRACTOR_MAPPING: lowerCamelCase_ =FEATURE_EXTRACTOR_MAPPING[type(lowerCAmelCase )] return feature_extractor_class.from_dict(lowerCAmelCase, **lowerCAmelCase ) raise ValueError( f'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a ''' f'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following ''' f'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' ) @staticmethod def lowercase__ ( lowerCAmelCase, lowerCAmelCase ): """simple docstring""" FEATURE_EXTRACTOR_MAPPING.register(lowerCAmelCase, lowerCAmelCase )
75
1
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class __UpperCamelCase ( lowerCamelCase__ ): lowercase : torch.FloatTensor lowercase : torch.FloatTensor lowercase : Optional[torch.FloatTensor] =None class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): lowercase : List[str] =2 @register_to_config def __init__( self, lowerCAmelCase = 0.0_2, lowerCAmelCase = 100, lowerCAmelCase = 1.0_0_7, lowerCAmelCase = 80, lowerCAmelCase = 0.0_5, lowerCAmelCase = 50, ): """simple docstring""" lowerCamelCase_ =sigma_max # setable values lowerCamelCase_ =None lowerCamelCase_ =None lowerCamelCase_ =None # sigma(t_i) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" return sample def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" lowerCamelCase_ =num_inference_steps lowerCamelCase_ =np.arange(0, self.num_inference_steps )[::-1].copy() lowerCamelCase_ =torch.from_numpy(lowerCAmelCase ).to(lowerCAmelCase ) lowerCamelCase_ =[ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] lowerCamelCase_ =torch.tensor(lowerCAmelCase, dtype=torch.floataa, device=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" if self.config.s_min <= sigma <= self.config.s_max: lowerCamelCase_ =min(self.config.s_churn / self.num_inference_steps, 2**0.5 - 1 ) else: lowerCamelCase_ =0 # sample eps ~ N(0, S_noise^2 * I) lowerCamelCase_ =self.config.s_noise * randn_tensor(sample.shape, generator=lowerCAmelCase ).to(sample.device ) lowerCamelCase_ =sigma + gamma * sigma lowerCamelCase_ =sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = True, ): """simple docstring""" lowerCamelCase_ =sample_hat + sigma_hat * model_output lowerCamelCase_ =(sample_hat - pred_original_sample) / sigma_hat lowerCamelCase_ =sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=lowerCAmelCase, derivative=lowerCAmelCase, pred_original_sample=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = True, ): """simple docstring""" lowerCamelCase_ =sample_prev + sigma_prev * model_output lowerCamelCase_ =(sample_prev - pred_original_sample) / sigma_prev lowerCamelCase_ =sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=lowerCAmelCase, derivative=lowerCAmelCase, pred_original_sample=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" raise NotImplementedError()
75
'''simple docstring''' import argparse import csv import logging import os import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from tqdm import tqdm, trange from transformers import ( CONFIG_NAME, WEIGHTS_NAME, AdamW, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, get_linear_schedule_with_warmup, ) logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) a_ : Optional[int] = logging.getLogger(__name__) def a_ ( __snake_case : Union[str, Any] , __snake_case : Union[str, Any] ) -> str: """simple docstring""" lowerCamelCase_ =np.argmax(__snake_case , axis=1 ) return np.sum(outputs == labels ) def a_ ( __snake_case : List[Any] ) -> Tuple: """simple docstring""" with open(__snake_case , encoding='''utf_8''' ) as f: lowerCamelCase_ =csv.reader(__snake_case ) lowerCamelCase_ =[] next(__snake_case ) # skip the first line for line in tqdm(__snake_case ): output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) ) return output def a_ ( __snake_case : str , __snake_case : Dict , __snake_case : List[str] , __snake_case : List[str] , __snake_case : List[Any] , __snake_case : Dict ) -> Dict: """simple docstring""" lowerCamelCase_ =[] for dataset in encoded_datasets: lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =np.zeros((n_batch, 2, input_len) , dtype=np.intaa ) lowerCamelCase_ =np.zeros((n_batch, 2) , dtype=np.intaa ) lowerCamelCase_ =np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa ) lowerCamelCase_ =np.zeros((n_batch,) , dtype=np.intaa ) for ( i, (story, conta, conta, mc_label), ) in enumerate(__snake_case ): lowerCamelCase_ =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] lowerCamelCase_ =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] lowerCamelCase_ =with_conta lowerCamelCase_ =with_conta lowerCamelCase_ =len(__snake_case ) - 1 lowerCamelCase_ =len(__snake_case ) - 1 lowerCamelCase_ =with_conta lowerCamelCase_ =with_conta lowerCamelCase_ =mc_label lowerCamelCase_ =(input_ids, mc_token_ids, lm_labels, mc_labels) tensor_datasets.append(tuple(torch.tensor(__snake_case ) for t in all_inputs ) ) return tensor_datasets def a_ ( ) -> Optional[int]: """simple docstring""" lowerCamelCase_ =argparse.ArgumentParser() parser.add_argument('''--model_name''' , type=__snake_case , default='''openai-gpt''' , help='''pretrained model name''' ) parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' ) parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''' ) parser.add_argument( '''--output_dir''' , default=__snake_case , type=__snake_case , required=__snake_case , help='''The output directory where the model predictions and checkpoints will be written.''' , ) parser.add_argument('''--train_dataset''' , type=__snake_case , default='''''' ) parser.add_argument('''--eval_dataset''' , type=__snake_case , default='''''' ) parser.add_argument('''--seed''' , type=__snake_case , default=42 ) parser.add_argument('''--num_train_epochs''' , type=__snake_case , default=3 ) parser.add_argument('''--train_batch_size''' , type=__snake_case , default=8 ) parser.add_argument('''--eval_batch_size''' , type=__snake_case , default=16 ) parser.add_argument('''--adam_epsilon''' , default=1e-8 , type=__snake_case , help='''Epsilon for Adam optimizer.''' ) parser.add_argument('''--max_grad_norm''' , type=__snake_case , default=1 ) parser.add_argument( '''--max_steps''' , default=-1 , type=__snake_case , help=( '''If > 0: set total number of training steps to perform. Override num_train_epochs.''' ) , ) parser.add_argument( '''--gradient_accumulation_steps''' , type=__snake_case , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , ) parser.add_argument('''--learning_rate''' , type=__snake_case , default=6.25e-5 ) parser.add_argument('''--warmup_steps''' , default=0 , type=__snake_case , help='''Linear warmup over warmup_steps.''' ) parser.add_argument('''--lr_schedule''' , type=__snake_case , default='''warmup_linear''' ) parser.add_argument('''--weight_decay''' , type=__snake_case , default=0.0_1 ) parser.add_argument('''--lm_coef''' , type=__snake_case , default=0.9 ) parser.add_argument('''--n_valid''' , type=__snake_case , default=374 ) parser.add_argument('''--server_ip''' , type=__snake_case , default='''''' , help='''Can be used for distant debugging.''' ) parser.add_argument('''--server_port''' , type=__snake_case , default='''''' , help='''Can be used for distant debugging.''' ) lowerCamelCase_ =parser.parse_args() print(__snake_case ) if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('''Waiting for debugger attach''' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__snake_case ) ptvsd.wait_for_attach() random.seed(args.seed ) np.random.seed(args.seed ) torch.manual_seed(args.seed ) torch.cuda.manual_seed_all(args.seed ) lowerCamelCase_ =torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) lowerCamelCase_ =torch.cuda.device_count() logger.info('''device: {}, n_gpu {}'''.format(__snake_case , __snake_case ) ) if not args.do_train and not args.do_eval: raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' ) if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) # Load tokenizer and model # This loading functions also add new tokens and embeddings called `special tokens` # These new embeddings will be fine-tuned on the RocStories dataset lowerCamelCase_ =['''_start_''', '''_delimiter_''', '''_classify_'''] lowerCamelCase_ =OpenAIGPTTokenizer.from_pretrained(args.model_name ) tokenizer.add_tokens(__snake_case ) lowerCamelCase_ =tokenizer.convert_tokens_to_ids(__snake_case ) lowerCamelCase_ =OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name ) model.resize_token_embeddings(len(__snake_case ) ) model.to(__snake_case ) # Load and encode the datasets def tokenize_and_encode(__snake_case : Union[str, Any] ): if isinstance(__snake_case , __snake_case ): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(__snake_case ) ) elif isinstance(__snake_case , __snake_case ): return obj return [tokenize_and_encode(__snake_case ) for o in obj] logger.info('''Encoding dataset...''' ) lowerCamelCase_ =load_rocstories_dataset(args.train_dataset ) lowerCamelCase_ =load_rocstories_dataset(args.eval_dataset ) lowerCamelCase_ =(train_dataset, eval_dataset) lowerCamelCase_ =tokenize_and_encode(__snake_case ) # Compute the max input length for the Transformer lowerCamelCase_ =model.config.n_positions // 2 - 2 lowerCamelCase_ =max( len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3 for dataset in encoded_datasets for story, conta, conta, _ in dataset ) lowerCamelCase_ =min(__snake_case , model.config.n_positions ) # Max size of input for the pre-trained model # Prepare inputs tensors and dataloaders lowerCamelCase_ =pre_process_datasets(__snake_case , __snake_case , __snake_case , *__snake_case ) lowerCamelCase_, lowerCamelCase_ =tensor_datasets[0], tensor_datasets[1] lowerCamelCase_ =TensorDataset(*__snake_case ) lowerCamelCase_ =RandomSampler(__snake_case ) lowerCamelCase_ =DataLoader(__snake_case , sampler=__snake_case , batch_size=args.train_batch_size ) lowerCamelCase_ =TensorDataset(*__snake_case ) lowerCamelCase_ =SequentialSampler(__snake_case ) lowerCamelCase_ =DataLoader(__snake_case , sampler=__snake_case , batch_size=args.eval_batch_size ) # Prepare optimizer if args.do_train: if args.max_steps > 0: lowerCamelCase_ =args.max_steps lowerCamelCase_ =args.max_steps // (len(__snake_case ) // args.gradient_accumulation_steps) + 1 else: lowerCamelCase_ =len(__snake_case ) // args.gradient_accumulation_steps * args.num_train_epochs lowerCamelCase_ =list(model.named_parameters() ) lowerCamelCase_ =['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight'''] lowerCamelCase_ =[ { '''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )], '''weight_decay''': args.weight_decay, }, {'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0}, ] lowerCamelCase_ =AdamW(__snake_case , lr=args.learning_rate , eps=args.adam_epsilon ) lowerCamelCase_ =get_linear_schedule_with_warmup( __snake_case , num_warmup_steps=args.warmup_steps , num_training_steps=__snake_case ) if args.do_train: lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =0, 0, None model.train() for _ in trange(int(args.num_train_epochs ) , desc='''Epoch''' ): lowerCamelCase_ =0 lowerCamelCase_ =0 lowerCamelCase_ =tqdm(__snake_case , desc='''Training''' ) for step, batch in enumerate(__snake_case ): lowerCamelCase_ =tuple(t.to(__snake_case ) for t in batch ) lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =batch lowerCamelCase_ =model(__snake_case , mc_token_ids=__snake_case , lm_labels=__snake_case , mc_labels=__snake_case ) lowerCamelCase_ =args.lm_coef * losses[0] + losses[1] loss.backward() optimizer.step() scheduler.step() optimizer.zero_grad() tr_loss += loss.item() lowerCamelCase_ =( loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item() ) nb_tr_steps += 1 lowerCamelCase_ ='''Training loss: {:.2e} lr: {:.2e}'''.format(__snake_case , scheduler.get_lr()[0] ) # Save a trained model if args.do_train: # Save a trained model, configuration and tokenizer lowerCamelCase_ =model.module if hasattr(__snake_case , '''module''' ) else model # Only save the model itself # If we save using the predefined names, we can load using `from_pretrained` lowerCamelCase_ =os.path.join(args.output_dir , __snake_case ) lowerCamelCase_ =os.path.join(args.output_dir , __snake_case ) torch.save(model_to_save.state_dict() , __snake_case ) model_to_save.config.to_json_file(__snake_case ) tokenizer.save_vocabulary(args.output_dir ) # Load a trained model and vocabulary that you have fine-tuned lowerCamelCase_ =OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir ) lowerCamelCase_ =OpenAIGPTTokenizer.from_pretrained(args.output_dir ) model.to(__snake_case ) if args.do_eval: model.eval() lowerCamelCase_, lowerCamelCase_ =0, 0 lowerCamelCase_, lowerCamelCase_ =0, 0 for batch in tqdm(__snake_case , desc='''Evaluating''' ): lowerCamelCase_ =tuple(t.to(__snake_case ) for t in batch ) lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =batch with torch.no_grad(): lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =model( __snake_case , mc_token_ids=__snake_case , lm_labels=__snake_case , mc_labels=__snake_case ) lowerCamelCase_ =mc_logits.detach().cpu().numpy() lowerCamelCase_ =mc_labels.to('''cpu''' ).numpy() lowerCamelCase_ =accuracy(__snake_case , __snake_case ) eval_loss += mc_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0 ) nb_eval_steps += 1 lowerCamelCase_ =eval_loss / nb_eval_steps lowerCamelCase_ =eval_accuracy / nb_eval_examples lowerCamelCase_ =tr_loss / nb_tr_steps if args.do_train else None lowerCamelCase_ ={'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss} lowerCamelCase_ =os.path.join(args.output_dir , '''eval_results.txt''' ) with open(__snake_case , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key in sorted(result.keys() ): logger.info(''' %s = %s''' , __snake_case , str(result[key] ) ) writer.write('''%s = %s\n''' % (key, str(result[key] )) ) if __name__ == "__main__": main()
75
1
'''simple docstring''' import argparse import os import shutil from pathlib import Path import onnx import torch from packaging import version from torch.onnx import export from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline a_ : Optional[int] = version.parse(version.parse(torch.__version__).base_version) < version.parse("""1.11""") def a_ ( __snake_case : Optional[Any] , __snake_case : tuple , __snake_case : Path , __snake_case : List[Any] , __snake_case : str , __snake_case : int , __snake_case : Union[str, Any] , __snake_case : int=False , ) -> Any: """simple docstring""" output_path.parent.mkdir(parents=__snake_case , exist_ok=__snake_case ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( __snake_case , __snake_case , f=output_path.as_posix() , input_names=__snake_case , output_names=__snake_case , dynamic_axes=__snake_case , do_constant_folding=__snake_case , use_external_data_format=__snake_case , enable_onnx_checker=__snake_case , opset_version=__snake_case , ) else: export( __snake_case , __snake_case , f=output_path.as_posix() , input_names=__snake_case , output_names=__snake_case , dynamic_axes=__snake_case , do_constant_folding=__snake_case , opset_version=__snake_case , ) @torch.no_grad() def a_ ( __snake_case : str , __snake_case : str , __snake_case : int , __snake_case : bool = False ) -> Dict: """simple docstring""" lowerCamelCase_ =torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): lowerCamelCase_ ='''cuda''' elif fpaa and not torch.cuda.is_available(): raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' ) else: lowerCamelCase_ ='''cpu''' lowerCamelCase_ =StableDiffusionPipeline.from_pretrained(__snake_case , torch_dtype=__snake_case ).to(__snake_case ) lowerCamelCase_ =Path(__snake_case ) # TEXT ENCODER lowerCamelCase_ =pipeline.text_encoder.config.max_position_embeddings lowerCamelCase_ =pipeline.text_encoder.config.hidden_size lowerCamelCase_ =pipeline.tokenizer( '''A sample prompt''' , padding='''max_length''' , max_length=pipeline.tokenizer.model_max_length , truncation=__snake_case , return_tensors='''pt''' , ) onnx_export( pipeline.text_encoder , model_args=(text_input.input_ids.to(device=__snake_case , dtype=torch.intaa )) , output_path=output_path / '''text_encoder''' / '''model.onnx''' , ordered_input_names=['''input_ids'''] , output_names=['''last_hidden_state''', '''pooler_output'''] , dynamic_axes={ '''input_ids''': {0: '''batch''', 1: '''sequence'''}, } , opset=__snake_case , ) del pipeline.text_encoder # UNET lowerCamelCase_ =pipeline.unet.config.in_channels lowerCamelCase_ =pipeline.unet.config.sample_size lowerCamelCase_ =output_path / '''unet''' / '''model.onnx''' onnx_export( pipeline.unet , model_args=( torch.randn(2 , __snake_case , __snake_case , __snake_case ).to(device=__snake_case , dtype=__snake_case ), torch.randn(2 ).to(device=__snake_case , dtype=__snake_case ), torch.randn(2 , __snake_case , __snake_case ).to(device=__snake_case , dtype=__snake_case ), False, ) , output_path=__snake_case , ordered_input_names=['''sample''', '''timestep''', '''encoder_hidden_states''', '''return_dict'''] , output_names=['''out_sample'''] , dynamic_axes={ '''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''}, '''timestep''': {0: '''batch'''}, '''encoder_hidden_states''': {0: '''batch''', 1: '''sequence'''}, } , opset=__snake_case , use_external_data_format=__snake_case , ) lowerCamelCase_ =str(unet_path.absolute().as_posix() ) lowerCamelCase_ =os.path.dirname(__snake_case ) lowerCamelCase_ =onnx.load(__snake_case ) # clean up existing tensor files shutil.rmtree(__snake_case ) os.mkdir(__snake_case ) # collate external tensor files into one onnx.save_model( __snake_case , __snake_case , save_as_external_data=__snake_case , all_tensors_to_one_file=__snake_case , location='''weights.pb''' , convert_attribute=__snake_case , ) del pipeline.unet # VAE ENCODER lowerCamelCase_ =pipeline.vae lowerCamelCase_ =vae_encoder.config.in_channels lowerCamelCase_ =vae_encoder.config.sample_size # need to get the raw tensor output (sample) from the encoder lowerCamelCase_ =lambda __snake_case , __snake_case : vae_encoder.encode(__snake_case , __snake_case )[0].sample() onnx_export( __snake_case , model_args=( torch.randn(1 , __snake_case , __snake_case , __snake_case ).to(device=__snake_case , dtype=__snake_case ), False, ) , output_path=output_path / '''vae_encoder''' / '''model.onnx''' , ordered_input_names=['''sample''', '''return_dict'''] , output_names=['''latent_sample'''] , dynamic_axes={ '''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''}, } , opset=__snake_case , ) # VAE DECODER lowerCamelCase_ =pipeline.vae lowerCamelCase_ =vae_decoder.config.latent_channels lowerCamelCase_ =vae_decoder.config.out_channels # forward only through the decoder part lowerCamelCase_ =vae_encoder.decode onnx_export( __snake_case , model_args=( torch.randn(1 , __snake_case , __snake_case , __snake_case ).to(device=__snake_case , dtype=__snake_case ), False, ) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={ '''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''}, } , opset=__snake_case , ) del pipeline.vae # SAFETY CHECKER if pipeline.safety_checker is not None: lowerCamelCase_ =pipeline.safety_checker lowerCamelCase_ =safety_checker.config.vision_config.num_channels lowerCamelCase_ =safety_checker.config.vision_config.image_size lowerCamelCase_ =safety_checker.forward_onnx onnx_export( pipeline.safety_checker , model_args=( torch.randn( 1 , __snake_case , __snake_case , __snake_case , ).to(device=__snake_case , dtype=__snake_case ), torch.randn(1 , __snake_case , __snake_case , __snake_case ).to(device=__snake_case , dtype=__snake_case ), ) , output_path=output_path / '''safety_checker''' / '''model.onnx''' , ordered_input_names=['''clip_input''', '''images'''] , output_names=['''out_images''', '''has_nsfw_concepts'''] , dynamic_axes={ '''clip_input''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''}, '''images''': {0: '''batch''', 1: '''height''', 2: '''width''', 3: '''channels'''}, } , opset=__snake_case , ) del pipeline.safety_checker lowerCamelCase_ =OnnxRuntimeModel.from_pretrained(output_path / '''safety_checker''' ) lowerCamelCase_ =pipeline.feature_extractor else: lowerCamelCase_ =None lowerCamelCase_ =None lowerCamelCase_ =OnnxStableDiffusionPipeline( vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_encoder''' ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_decoder''' ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''text_encoder''' ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / '''unet''' ) , scheduler=pipeline.scheduler , safety_checker=__snake_case , feature_extractor=__snake_case , requires_safety_checker=safety_checker is not None , ) onnx_pipeline.save_pretrained(__snake_case ) print('''ONNX pipeline saved to''' , __snake_case ) del pipeline del onnx_pipeline lowerCamelCase_ =OnnxStableDiffusionPipeline.from_pretrained(__snake_case , provider='''CPUExecutionProvider''' ) print('''ONNX pipeline is loadable''' ) if __name__ == "__main__": a_ : List[Any] = argparse.ArgumentParser() parser.add_argument( """--model_path""", type=str, required=True, help="""Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).""", ) parser.add_argument("""--output_path""", type=str, required=True, help="""Path to the output model.""") parser.add_argument( """--opset""", default=14, type=int, help="""The version of the ONNX operator set to use.""", ) parser.add_argument("""--fp16""", action="""store_true""", default=False, help="""Export the models in `float16` mode""") a_ : int = parser.parse_args() convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
75
'''simple docstring''' import copy import os import cva import numpy as np from matplotlib import pyplot as plt class __UpperCamelCase : def __init__( self ): """simple docstring""" lowerCamelCase_ ='''''' lowerCamelCase_ ='''''' lowerCamelCase_ =[] lowerCamelCase_ =0 lowerCamelCase_ =256 lowerCamelCase_ =0 lowerCamelCase_ =0 lowerCamelCase_ =0 lowerCamelCase_ =0 def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =cva.imread(lowerCAmelCase, 0 ) lowerCamelCase_ =copy.deepcopy(self.img ) lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =plt.hist(self.img.ravel(), 256, [0, 256], label='''x''' ) lowerCamelCase_ =np.sum(lowerCAmelCase ) for i in range(len(lowerCAmelCase ) ): lowerCamelCase_ =x[i] / self.k self.sk += prk lowerCamelCase_ =(self.L - 1) * self.sk if self.rem != 0: lowerCamelCase_ =int(last % last ) lowerCamelCase_ =int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(lowerCAmelCase ) lowerCamelCase_ =int(np.ma.count(self.img ) / self.img[1].size ) lowerCamelCase_ =self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): lowerCamelCase_ =self.img[j][i] if num != self.last_list[num]: lowerCamelCase_ =self.last_list[num] cva.imwrite('''output_data/output.jpg''', self.img ) def lowercase__ ( self ): """simple docstring""" plt.hist(self.img.ravel(), 256, [0, 256] ) def lowercase__ ( self ): """simple docstring""" cva.imshow('''Output-Image''', self.img ) cva.imshow('''Input-Image''', self.original_image ) cva.waitKey(5_000 ) cva.destroyAllWindows() if __name__ == "__main__": a_ : str = os.path.join(os.path.basename(__file__), """image_data/input.jpg""") a_ : Optional[Any] = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
75
1
'''simple docstring''' from __future__ import annotations def a_ ( __snake_case : str , __snake_case : list[str] | None = None , __snake_case : dict[str, float] | None = None , __snake_case : bool = False , ) -> tuple[int, float, str]: """simple docstring""" lowerCamelCase_ =cipher_alphabet or [chr(__snake_case ) for i in range(97 , 123 )] # If the argument is None or the user provided an empty dictionary if not frequencies_dict: # Frequencies of letters in the english language (how much they show up) lowerCamelCase_ ={ '''a''': 0.0_8_4_9_7, '''b''': 0.0_1_4_9_2, '''c''': 0.0_2_2_0_2, '''d''': 0.0_4_2_5_3, '''e''': 0.1_1_1_6_2, '''f''': 0.0_2_2_2_8, '''g''': 0.0_2_0_1_5, '''h''': 0.0_6_0_9_4, '''i''': 0.0_7_5_4_6, '''j''': 0.0_0_1_5_3, '''k''': 0.0_1_2_9_2, '''l''': 0.0_4_0_2_5, '''m''': 0.0_2_4_0_6, '''n''': 0.0_6_7_4_9, '''o''': 0.0_7_5_0_7, '''p''': 0.0_1_9_2_9, '''q''': 0.0_0_0_9_5, '''r''': 0.0_7_5_8_7, '''s''': 0.0_6_3_2_7, '''t''': 0.0_9_3_5_6, '''u''': 0.0_2_7_5_8, '''v''': 0.0_0_9_7_8, '''w''': 0.0_2_5_6_0, '''x''': 0.0_0_1_5_0, '''y''': 0.0_1_9_9_4, '''z''': 0.0_0_0_7_7, } else: # Custom frequencies dictionary lowerCamelCase_ =frequencies_dict if not case_sensitive: lowerCamelCase_ =ciphertext.lower() # Chi squared statistic values lowerCamelCase_ ={} # cycle through all of the shifts for shift in range(len(__snake_case ) ): lowerCamelCase_ ='''''' # decrypt the message with the shift for letter in ciphertext: try: # Try to index the letter in the alphabet lowerCamelCase_ =(alphabet_letters.index(letter.lower() ) - shift) % len( __snake_case ) decrypted_with_shift += ( alphabet_letters[new_key].upper() if case_sensitive and letter.isupper() else alphabet_letters[new_key] ) except ValueError: # Append the character if it isn't in the alphabet decrypted_with_shift += letter lowerCamelCase_ =0.0 # Loop through each letter in the decoded message with the shift for letter in decrypted_with_shift: if case_sensitive: lowerCamelCase_ =letter.lower() if letter in frequencies: # Get the amount of times the letter occurs in the message lowerCamelCase_ =decrypted_with_shift.lower().count(__snake_case ) # Get the excepcted amount of times the letter should appear based # on letter frequencies lowerCamelCase_ =frequencies[letter] * occurrences # Complete the chi squared statistic formula lowerCamelCase_ =((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value else: if letter.lower() in frequencies: # Get the amount of times the letter occurs in the message lowerCamelCase_ =decrypted_with_shift.count(__snake_case ) # Get the excepcted amount of times the letter should appear based # on letter frequencies lowerCamelCase_ =frequencies[letter] * occurrences # Complete the chi squared statistic formula lowerCamelCase_ =((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary lowerCamelCase_ =( chi_squared_statistic, decrypted_with_shift, ) # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic def chi_squared_statistic_values_sorting_key(__snake_case : int ) -> tuple[float, str]: return chi_squared_statistic_values[key] lowerCamelCase_ =min( __snake_case , key=__snake_case , ) # Get all the data from the most likely cipher (key, decoded message) ( ( lowerCamelCase_ ), ( lowerCamelCase_ ), ) =chi_squared_statistic_values[most_likely_cipher] # Return the data on the most likely shift return ( most_likely_cipher, most_likely_cipher_chi_squared_value, decoded_most_likely_cipher, )
75
'''simple docstring''' from collections import UserDict from typing import Union import numpy as np import requests from ..utils import ( add_end_docstrings, logging, ) from .audio_classification import ffmpeg_read from .base import PIPELINE_INIT_ARGS, Pipeline a_ : Any = logging.get_logger(__name__) @add_end_docstrings(lowerCamelCase__ ) class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, **lowerCAmelCase ): """simple docstring""" super().__init__(**lowerCAmelCase ) if self.framework != "pt": raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' ) # No specific FOR_XXX available yet def __call__( self, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return super().__call__(lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ ={} if "candidate_labels" in kwargs: lowerCamelCase_ =kwargs['''candidate_labels'''] if "hypothesis_template" in kwargs: lowerCamelCase_ =kwargs['''hypothesis_template'''] return preprocess_params, {}, {} def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None, lowerCAmelCase="This is a sound of {}." ): """simple docstring""" if isinstance(lowerCAmelCase, lowerCAmelCase ): if audio.startswith('''http://''' ) or audio.startswith('''https://''' ): # We need to actually check for a real protocol, otherwise it's impossible to use a local file # like http_huggingface_co.png lowerCamelCase_ =requests.get(lowerCAmelCase ).content else: with open(lowerCAmelCase, '''rb''' ) as f: lowerCamelCase_ =f.read() if isinstance(lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =ffmpeg_read(lowerCAmelCase, self.feature_extractor.sampling_rate ) if not isinstance(lowerCAmelCase, np.ndarray ): raise ValueError('''We expect a numpy ndarray as input''' ) if len(audio.shape ) != 1: raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' ) lowerCamelCase_ =self.feature_extractor( [audio], sampling_rate=self.feature_extractor.sampling_rate, return_tensors='''pt''' ) lowerCamelCase_ =candidate_labels lowerCamelCase_ =[hypothesis_template.format(lowerCAmelCase ) for x in candidate_labels] lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=self.framework, padding=lowerCAmelCase ) lowerCamelCase_ =[text_inputs] return inputs def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =model_inputs.pop('''candidate_labels''' ) lowerCamelCase_ =model_inputs.pop('''text_inputs''' ) if isinstance(text_inputs[0], lowerCAmelCase ): lowerCamelCase_ =text_inputs[0] else: # Batching case. lowerCamelCase_ =text_inputs[0][0] lowerCamelCase_ =self.model(**lowerCAmelCase, **lowerCAmelCase ) lowerCamelCase_ ={ '''candidate_labels''': candidate_labels, '''logits''': outputs.logits_per_audio, } return model_outputs def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =model_outputs.pop('''candidate_labels''' ) lowerCamelCase_ =model_outputs['''logits'''][0] if self.framework == "pt": lowerCamelCase_ =logits.softmax(dim=0 ) lowerCamelCase_ =probs.tolist() else: raise ValueError('''`tf` framework not supported.''' ) lowerCamelCase_ =[ {'''score''': score, '''label''': candidate_label} for score, candidate_label in sorted(zip(lowerCAmelCase, lowerCAmelCase ), key=lambda lowerCAmelCase : -x[0] ) ] return result
75
1
'''simple docstring''' import inspect import warnings from typing import Any, Dict, Optional, Union from packaging import version def a_ ( *__snake_case : Tuple , __snake_case : Optional[Union[Dict, Any]] = None , __snake_case : List[str]=True , __snake_case : Tuple=2 ) -> Any: """simple docstring""" from .. import __version__ lowerCamelCase_ =take_from lowerCamelCase_ =() if not isinstance(args[0] , __snake_case ): lowerCamelCase_ =(args,) for attribute, version_name, message in args: if version.parse(version.parse(__snake_case ).base_version ) >= version.parse(__snake_case ): raise ValueError( F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\'''' F''' version {__version__} is >= {version_name}''' ) lowerCamelCase_ =None if isinstance(__snake_case , __snake_case ) and attribute in deprecated_kwargs: values += (deprecated_kwargs.pop(__snake_case ),) lowerCamelCase_ =F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.''' elif hasattr(__snake_case , __snake_case ): values += (getattr(__snake_case , __snake_case ),) lowerCamelCase_ =F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.''' elif deprecated_kwargs is None: lowerCamelCase_ =F'''`{attribute}` is deprecated and will be removed in version {version_name}.''' if warning is not None: lowerCamelCase_ =warning + ''' ''' if standard_warn else '''''' warnings.warn(warning + message , __snake_case , stacklevel=__snake_case ) if isinstance(__snake_case , __snake_case ) and len(__snake_case ) > 0: lowerCamelCase_ =inspect.getouterframes(inspect.currentframe() )[1] lowerCamelCase_ =call_frame.filename lowerCamelCase_ =call_frame.lineno lowerCamelCase_ =call_frame.function lowerCamelCase_, lowerCamelCase_ =next(iter(deprecated_kwargs.items() ) ) raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' ) if len(__snake_case ) == 0: return elif len(__snake_case ) == 1: return values[0] return values
75
'''simple docstring''' import unittest from pathlib import Path from shutil import copyfile from transformers import SPIECE_UNDERLINE, is_sentencepiece_available from transformers.models.speech_to_text import SpeechaTextTokenizer from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin a_ : str = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_sentencepiece_available(): import sentencepiece as sp a_ : Optional[Any] = 5 a_ : str = 10 @require_sentencepiece @require_tokenizers class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ): lowercase : int =SpeechaTextTokenizer lowercase : int =False lowercase : List[str] =True def lowercase__ ( self ): """simple docstring""" super().setUp() lowerCamelCase_ =sp.SentencePieceProcessor() spm_model.Load(lowerCAmelCase ) lowerCamelCase_ =['''<s>''', '''<pad>''', '''</s>''', '''<unk>'''] vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(lowerCAmelCase ) )] lowerCamelCase_ =dict(zip(lowerCAmelCase, range(len(lowerCAmelCase ) ) ) ) lowerCamelCase_ =Path(self.tmpdirname ) save_json(lowerCAmelCase, save_dir / VOCAB_FILES_NAMES['''vocab_file'''] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(lowerCAmelCase, save_dir / VOCAB_FILES_NAMES['''spm_file'''] ) lowerCamelCase_ =SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''<pad>''' lowerCamelCase_ =1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase ), lowerCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase ), lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0], '''<s>''' ) self.assertEqual(vocab_keys[1], '''<pad>''' ) self.assertEqual(vocab_keys[-1], '''j''' ) self.assertEqual(len(lowerCAmelCase ), 1_001 ) def lowercase__ ( self ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size, 1_001 ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) lowerCamelCase_ =tokenizer.tokenize('''This is a test''' ) self.assertListEqual(lowerCAmelCase, ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCAmelCase ), [289, 50, 14, 174, 386], ) lowerCamelCase_ =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( lowerCAmelCase, [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''], ) lowerCamelCase_ =tokenizer.convert_tokens_to_ids(lowerCAmelCase ) self.assertListEqual(lowerCAmelCase, [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] ) lowerCamelCase_ =tokenizer.convert_ids_to_tokens(lowerCAmelCase ) self.assertListEqual( lowerCAmelCase, [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''], ) @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ={'''input_ids''': [[3_791, 797, 31, 11, 64, 797, 31, 2_429, 433, 12, 1_176, 12, 20, 786, 915, 142, 2_413, 240, 37, 3_238, 797, 31, 11, 35, 93, 915, 142, 2_413, 240, 37, 5_540, 567, 1_276, 93, 37, 610, 40, 62, 455, 657, 1_042, 123, 780, 177, 37, 309, 241, 1_298, 514, 20, 292, 2_737, 114, 2_469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3_388, 511, 459, 4, 3_555, 40, 321, 302, 705, 4, 3_388, 511, 583, 326, 5, 5, 5, 62, 3_310, 560, 177, 2_680, 217, 1_508, 32, 31, 853, 418, 64, 583, 511, 1_605, 62, 35, 93, 560, 177, 2_680, 217, 1_508, 1_521, 64, 583, 511, 519, 62, 20, 1_515, 764, 20, 149, 261, 5_625, 7_972, 20, 5_540, 567, 1_276, 93, 3_925, 1_675, 11, 15, 802, 7_972, 576, 217, 1_508, 11, 35, 93, 1_253, 2_441, 15, 289, 652, 31, 416, 321, 3_842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2_681, 1_153, 3_434, 20, 5_540, 37, 567, 126, 1_253, 2_441, 3_376, 449, 210, 431, 1_563, 177, 767, 5_540, 11, 1_203, 472, 11, 2_953, 685, 285, 364, 706, 1_153, 20, 6_799, 20, 2_869, 20, 4_464, 126, 40, 2_429, 20, 1_040, 866, 2_664, 418, 20, 318, 20, 1_726, 186, 20, 265, 522, 35, 93, 2_191, 4_634, 20, 1_040, 12, 6_799, 15, 228, 2_356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_575, 2_666, 684, 1_582, 1_176, 12, 627, 149, 619, 20, 4_902, 563, 11, 20, 149, 261, 3_420, 2_356, 174, 142, 4_714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase, model_name='''facebook/s2t-small-mustc-en-de-st''', revision='''a14f04cf0776c02f62a8cb800cf7909e15ea23ad''', ) @require_sentencepiece class __UpperCamelCase ( unittest.TestCase ): lowercase : Tuple ='valhalla/s2t_mustc_multilinguial_medium' lowercase : Dict ='C\'est trop cool' lowercase : str ='Esto es genial' @classmethod def lowercase__ ( cls ): """simple docstring""" lowerCamelCase_ =SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name ) return cls def lowercase__ ( self ): """simple docstring""" self.assertEqual(self.tokenizer.lang_code_to_id['''pt'''], 4 ) self.assertEqual(self.tokenizer.lang_code_to_id['''ru'''], 6 ) self.assertEqual(self.tokenizer.lang_code_to_id['''it'''], 9 ) self.assertEqual(self.tokenizer.lang_code_to_id['''de'''], 11 ) def lowercase__ ( self ): """simple docstring""" self.assertEqual(self.tokenizer.vocab_size, 10_000 ) def lowercase__ ( self ): """simple docstring""" self.assertIn(lowerCAmelCase, self.tokenizer.all_special_ids ) lowerCamelCase_ =[ES_CODE, 4, 1_601, 47, 7_647, 2] lowerCamelCase_ =self.tokenizer.decode(lowerCAmelCase, skip_special_tokens=lowerCAmelCase ) lowerCamelCase_ =self.tokenizer.decode(generated_ids[1:], skip_special_tokens=lowerCAmelCase ) self.assertEqual(lowerCAmelCase, lowerCAmelCase ) self.assertNotIn(self.tokenizer.eos_token, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''fr''' lowerCamelCase_ =self.tokenizer(self.french_text ).input_ids self.assertEqual(encoded[0], lowerCAmelCase ) self.assertEqual(encoded[-1], self.tokenizer.eos_token_id ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''fr''' self.assertListEqual(self.tokenizer.prefix_tokens, [FR_CODE] ) lowerCamelCase_ ='''es''' self.assertListEqual(self.tokenizer.prefix_tokens, [ES_CODE] )
75
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) a_ : Union[str, Any] = { """configuration_funnel""": ["""FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FunnelConfig"""], """convert_funnel_original_tf_checkpoint_to_pytorch""": [], """tokenization_funnel""": ["""FunnelTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : List[str] = ["""FunnelTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[int] = [ """FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""", """FunnelBaseModel""", """FunnelForMaskedLM""", """FunnelForMultipleChoice""", """FunnelForPreTraining""", """FunnelForQuestionAnswering""", """FunnelForSequenceClassification""", """FunnelForTokenClassification""", """FunnelModel""", """FunnelPreTrainedModel""", """load_tf_weights_in_funnel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[Any] = [ """TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFFunnelBaseModel""", """TFFunnelForMaskedLM""", """TFFunnelForMultipleChoice""", """TFFunnelForPreTraining""", """TFFunnelForQuestionAnswering""", """TFFunnelForSequenceClassification""", """TFFunnelForTokenClassification""", """TFFunnelModel""", """TFFunnelPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig from .tokenization_funnel import FunnelTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_funnel_fast import FunnelTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_funnel import ( FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, FunnelBaseModel, FunnelForMaskedLM, FunnelForMultipleChoice, FunnelForPreTraining, FunnelForQuestionAnswering, FunnelForSequenceClassification, FunnelForTokenClassification, FunnelModel, FunnelPreTrainedModel, load_tf_weights_in_funnel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_funnel import ( TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, TFFunnelPreTrainedModel, ) else: import sys a_ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
75
'''simple docstring''' import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch) # also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml # same for Vicuna-13b from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipImageProcessor, InstructBlipConfig, InstructBlipForConditionalGeneration, InstructBlipProcessor, InstructBlipQFormerConfig, InstructBlipVisionConfig, LlamaConfig, LlamaTokenizerFast, TaConfig, TaTokenizerFast, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def a_ ( ) -> Dict: """simple docstring""" lowerCamelCase_ ='''https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg''' lowerCamelCase_ =Image.open(requests.get(__snake_case , stream=__snake_case ).raw ).convert('''RGB''' ) return image def a_ ( __snake_case : Tuple ) -> Dict: """simple docstring""" lowerCamelCase_ =[] # fmt: off # vision encoder rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') ) rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') ) rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') ) rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') ) rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') ) rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') ) # QFormer rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.embeddings.layernorm.weight''') ) rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.embeddings.layernorm.bias''') ) # fmt: on return rename_keys def a_ ( __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : List[Any] ) -> Dict: """simple docstring""" lowerCamelCase_ =dct.pop(__snake_case ) lowerCamelCase_ =val def a_ ( __snake_case : str , __snake_case : Optional[Any] ) -> Any: """simple docstring""" for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases lowerCamelCase_ =state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' ) lowerCamelCase_ =state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' ) # next, set bias in the state dict lowerCamelCase_ =torch.cat((q_bias, torch.zeros_like(__snake_case , requires_grad=__snake_case ), v_bias) ) lowerCamelCase_ =qkv_bias def a_ ( __snake_case : List[str] ) -> Any: """simple docstring""" lowerCamelCase_ =364 if '''coco''' in model_name else 224 lowerCamelCase_ =InstructBlipVisionConfig(image_size=__snake_case ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "t5-xl" in model_name: lowerCamelCase_ =TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: lowerCamelCase_ =TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict() elif "vicuna-7b" in model_name: lowerCamelCase_ =LlamaConfig.from_pretrained('''decapoda-research/llama-7b-hf''' , vocab_size=3_2001 ).to_dict() elif "vicuna-13b" in model_name: lowerCamelCase_ =LlamaConfig.from_pretrained('''decapoda-research/llama-13b-hf''' , vocab_size=3_2001 ).to_dict() else: raise ValueError('''Model name not supported''' ) # the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1 lowerCamelCase_ =InstructBlipQFormerConfig(vocab_size=3_0523 ).to_dict() lowerCamelCase_ =InstructBlipConfig(vision_config=__snake_case , text_config=__snake_case , qformer_config=__snake_case ) return config, image_size @torch.no_grad() def a_ ( __snake_case : Optional[int] , __snake_case : str=None , __snake_case : List[Any]=False ) -> List[str]: """simple docstring""" lowerCamelCase_ =AutoTokenizer.from_pretrained('''bert-base-uncased''' , truncation_side='''left''' ) qformer_tokenizer.add_special_tokens({'''bos_token''': '''[DEC]'''} ) if "t5" in model_name: lowerCamelCase_ =TaTokenizerFast.from_pretrained('''google/flan-t5-xl''' , truncation_side='''left''' ) elif "vicuna" in model_name: # the following was used in the original implementation: # tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left") # tokenizer.add_special_tokens({"pad_token": "[PAD]"}) # tokenizer.add_special_tokens({"bos_token": "</s>"}) # tokenizer.add_special_tokens({"eos_token": "</s>"}) # tokenizer.add_special_tokens({"unk_token": "</s>"}) lowerCamelCase_ =LlamaTokenizerFast.from_pretrained( '''huggyllama/llama-7b''' , truncation_side='''left''' , bos_token='''</s>''' , unk_token='''</s>''' ) tokenizer.add_special_tokens({'''pad_token''': '''[PAD]'''} ) lowerCamelCase_, lowerCamelCase_ =get_blipa_config(__snake_case ) lowerCamelCase_ =InstructBlipForConditionalGeneration(__snake_case ).eval() lowerCamelCase_ ={ '''instructblip-vicuna-7b''': ('''blip2_vicuna_instruct''', '''vicuna7b'''), '''instructblip-vicuna-13b''': ('''blip2_vicuna_instruct''', '''vicuna13b'''), '''instructblip-flan-t5-xl''': ('''blip2_t5_instruct''', '''flant5xl'''), '''instructblip-flan-t5-xxl''': ('''blip2_t5_instruct''', '''flant5xxl'''), } lowerCamelCase_, lowerCamelCase_ =model_name_to_original[model_name] # load original model print('''Loading original model...''' ) lowerCamelCase_ ='''cuda:1''' if torch.cuda.is_available() else '''cpu''' lowerCamelCase_ ='''cuda:2''' if torch.cuda.is_available() else '''cpu''' lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =load_model_and_preprocess( name=__snake_case , model_type=__snake_case , is_eval=__snake_case , device=__snake_case ) original_model.eval() print('''Done!''' ) # update state dict keys lowerCamelCase_ =original_model.state_dict() lowerCamelCase_ =create_rename_keys(__snake_case ) for src, dest in rename_keys: rename_key(__snake_case , __snake_case , __snake_case ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): lowerCamelCase_ =state_dict.pop(__snake_case ) if key.startswith('''Qformer.bert''' ): lowerCamelCase_ =key.replace('''Qformer.bert''' , '''qformer''' ) if "attention.self" in key: lowerCamelCase_ =key.replace('''self''' , '''attention''' ) if "llm_proj" in key: lowerCamelCase_ =key.replace('''llm_proj''' , '''language_projection''' ) if "t5_proj" in key: lowerCamelCase_ =key.replace('''t5_proj''' , '''language_projection''' ) if key.startswith('''llm_model''' ): lowerCamelCase_ =key.replace('''llm_model''' , '''language_model''' ) if key.startswith('''t5''' ): lowerCamelCase_ =key.replace('''t5''' , '''language''' ) lowerCamelCase_ =val # read in qv biases read_in_q_v_bias(__snake_case , __snake_case ) # note: weights get loaded in torch.float32 by default hf_model.load_state_dict(__snake_case , strict=__snake_case ) lowerCamelCase_ =load_demo_image() lowerCamelCase_ ='''What is unusual about this image?''' # create processor lowerCamelCase_ =BlipImageProcessor( size={'''height''': image_size, '''width''': image_size} , image_mean=__snake_case , image_std=__snake_case ) lowerCamelCase_ =InstructBlipProcessor( image_processor=__snake_case , tokenizer=__snake_case , qformer_tokenizer=__snake_case , ) lowerCamelCase_ =processor(images=__snake_case , text=__snake_case , return_tensors='''pt''' ).to(__snake_case ) # make sure processor creates exact same pixel values lowerCamelCase_ =vis_processors['''eval'''](__snake_case ).unsqueeze(0 ).to(__snake_case ) lowerCamelCase_ =inputs.pixel_values assert torch.allclose(original_pixel_values.to(pixel_values.device ) , __snake_case ) original_model.to(__snake_case ) hf_model.to(__snake_case ) with torch.no_grad(): if "vicuna" in model_name: lowerCamelCase_ =original_model({'''image''': original_pixel_values, '''text_input''': [prompt]} ).logits lowerCamelCase_ =hf_model(**__snake_case ).logits else: lowerCamelCase_ =original_model( {'''image''': original_pixel_values, '''text_input''': [prompt], '''text_output''': ['''\n''']} ).logits lowerCamelCase_ =tokenizer('''\n''' , return_tensors='''pt''' ).input_ids.to(__snake_case ) lowerCamelCase_ =label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 ) lowerCamelCase_ =hf_model(**__snake_case , labels=__snake_case ).logits print('''First values of original logits:''' , original_logits[0, :3, :3] ) print('''First values of HF logits:''' , logits[0, :3, :3] ) # assert values assert original_logits.shape == logits.shape lowerCamelCase_ =1e-4 if '''vicuna''' in model_name else 1e-5 assert torch.allclose(original_logits.to(logits.device ) , __snake_case , atol=__snake_case ) print('''Looks ok!''' ) print('''Generating with original model...''' ) lowerCamelCase_ =original_model.generate({'''image''': original_pixel_values, '''prompt''': prompt} , num_beams=5 ) # important: we need to cast the weights of the HF model to the appropriate type print('''Generating with HF model...''' ) lowerCamelCase_ =hf_model.generate( **__snake_case , do_sample=__snake_case , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , ) if "vicuna" in model_name: # convert output id 0 to 2 (eos_token_id) # TODO add this in the generate method? lowerCamelCase_ =2 print('''Original generation:''' , __snake_case ) lowerCamelCase_ =processor.batch_decode(__snake_case , skip_special_tokens=__snake_case ) lowerCamelCase_ =[text.strip() for text in output_text] print('''HF generation:''' , __snake_case ) if pytorch_dump_folder_path is not None: processor.save_pretrained(__snake_case ) hf_model.save_pretrained(__snake_case ) if push_to_hub: processor.push_to_hub(F'''Salesforce/{model_name}''' ) hf_model.push_to_hub(F'''Salesforce/{model_name}''' ) if __name__ == "__main__": a_ : Any = argparse.ArgumentParser() a_ : Any = [ """instructblip-vicuna-7b""", """instructblip-vicuna-13b""", """instructblip-flan-t5-xl""", """instructblip-flan-t5-xxl""", ] parser.add_argument( """--model_name""", default="""instructblip-flan-t5-xl""", choices=choices, type=str, help="""Path to hf config.json of model to convert""", ) parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model and processor to the hub after converting""", ) a_ : str = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
75
1
'''simple docstring''' # Usage: # ./gen-card-allenai-wmt16.py import os from pathlib import Path def a_ ( __snake_case : List[str] , __snake_case : int , __snake_case : Any , __snake_case : int ) -> int: """simple docstring""" lowerCamelCase_ ={ '''en''': '''Machine learning is great, isn\'t it?''', '''ru''': '''Машинное обучение - это здорово, не так ли?''', '''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''', } # BLUE scores as follows: # "pair": [fairseq, transformers] lowerCamelCase_ ={ '''wmt16-en-de-dist-12-1''': [2_8.3, 2_7.5_2], '''wmt16-en-de-dist-6-1''': [2_7.4, 2_7.1_1], '''wmt16-en-de-12-1''': [2_6.9, 2_5.7_5], } lowerCamelCase_ =F'''{src_lang}-{tgt_lang}''' lowerCamelCase_ =F''' --- language: - {src_lang} - {tgt_lang} thumbnail: tags: - translation - wmt16 - allenai license: apache-2.0 datasets: - wmt16 metrics: - bleu --- # FSMT ## Model description This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}. For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369). All 3 models are available: * [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1) * [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1) * [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1) ## Intended uses & limitations #### How to use ```python from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = "allenai/{model_name}" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) input = "{texts[src_lang]}" input_ids = tokenizer.encode(input, return_tensors="pt") outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) print(decoded) # {texts[tgt_lang]} ``` #### Limitations and bias ## Training data Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369). ## Eval results Here are the BLEU scores: model | fairseq | transformers -------|---------|---------- {model_name} | {scores[model_name][0]} | {scores[model_name][1]} The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs. The score was calculated using this code: ```bash git clone https://github.com/huggingface/transformers cd transformers export PAIR={pair} export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=5 mkdir -p $DATA_DIR sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS ``` ## Data Sources - [training, etc.](http://www.statmt.org/wmt16/) - [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372) ### BibTeX entry and citation info ``` @misc{{kasai2020deep, title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}}, author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}}, year={{2020}}, eprint={{2006.10369}}, archivePrefix={{arXiv}}, primaryClass={{cs.CL}} }} ``` ''' model_card_dir.mkdir(parents=__snake_case , exist_ok=__snake_case ) lowerCamelCase_ =os.path.join(__snake_case , '''README.md''' ) print(F'''Generating {path}''' ) with open(__snake_case , '''w''' , encoding='''utf-8''' ) as f: f.write(__snake_case ) # make sure we are under the root of the project a_ : List[Any] = Path(__file__).resolve().parent.parent.parent a_ : List[Any] = repo_dir / """model_cards""" for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]: a_ : Dict = model_cards_dir / """allenai""" / model_name write_model_card(model_card_dir, src_lang="""en""", tgt_lang="""de""", model_name=model_name)
75
'''simple docstring''' from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class __UpperCamelCase ( lowerCamelCase__ ): def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return 0.0 def a_ ( __snake_case : np.ndarray , __snake_case : int ) -> tuple[int | float, int | float]: """simple docstring""" lowerCamelCase_ =min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) lowerCamelCase_ =max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def a_ ( __snake_case : FilterType , __snake_case : int ) -> None: """simple docstring""" lowerCamelCase_ =512 lowerCamelCase_ =[1] + [0] * (size - 1) lowerCamelCase_ =[filter_type.process(__snake_case ) for item in inputs] lowerCamelCase_ =[0] * (samplerate - size) # zero-padding outputs += filler lowerCamelCase_ =np.abs(np.fft.fft(__snake_case ) ) lowerCamelCase_ =20 * np.logaa(__snake_case ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('''Frequency (Hz)''' ) plt.xscale('''log''' ) # Display within reasonable bounds lowerCamelCase_ =get_bounds(__snake_case , __snake_case ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel('''Gain (dB)''' ) plt.plot(__snake_case ) plt.show() def a_ ( __snake_case : FilterType , __snake_case : int ) -> None: """simple docstring""" lowerCamelCase_ =512 lowerCamelCase_ =[1] + [0] * (size - 1) lowerCamelCase_ =[filter_type.process(__snake_case ) for item in inputs] lowerCamelCase_ =[0] * (samplerate - size) # zero-padding outputs += filler lowerCamelCase_ =np.angle(np.fft.fft(__snake_case ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('''Frequency (Hz)''' ) plt.xscale('''log''' ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel('''Phase shift (Radians)''' ) plt.plot(np.unwrap(__snake_case , -2 * pi ) ) plt.show()
75
1
'''simple docstring''' import math def a_ ( __snake_case : int ) -> bool: """simple docstring""" return math.sqrt(__snake_case ) * math.sqrt(__snake_case ) == num def a_ ( __snake_case : int ) -> bool: """simple docstring""" lowerCamelCase_ =0 lowerCamelCase_ =n while left <= right: lowerCamelCase_ =(left + right) // 2 if mid**2 == n: return True elif mid**2 > n: lowerCamelCase_ =mid - 1 else: lowerCamelCase_ =mid + 1 return False if __name__ == "__main__": import doctest doctest.testmod()
75
'''simple docstring''' import os import unittest from transformers import FunnelTokenizer, FunnelTokenizerFast from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ): lowercase : Union[str, Any] =FunnelTokenizer lowercase : List[str] =FunnelTokenizerFast lowercase : Union[str, Any] =True lowercase : int =True def lowercase__ ( self ): """simple docstring""" super().setUp() lowerCamelCase_ =[ '''<unk>''', '''<cls>''', '''<sep>''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] lowerCamelCase_ =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def lowercase__ ( self, **lowerCAmelCase ): """simple docstring""" return FunnelTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase ) def lowercase__ ( self, **lowerCAmelCase ): """simple docstring""" return FunnelTokenizerFast.from_pretrained(self.tmpdirname, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ ='''UNwant\u00E9d,running''' lowerCamelCase_ ='''unwanted, running''' return input_text, output_text def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.tokenizer_class(self.vocab_file ) lowerCamelCase_ =tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(lowerCAmelCase, ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ), [7, 4, 5, 10, 8, 9] ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_tokenizers(do_lower_case=lowerCAmelCase ) for tokenizer in tokenizers: lowerCamelCase_ =tokenizer('''UNwant\u00E9d,running''' ) lowerCamelCase_ =len(inputs['''input_ids'''] ) - 1 self.assertListEqual(inputs['''token_type_ids'''], [2] + [0] * sentence_len ) lowerCamelCase_ =tokenizer('''UNwant\u00E9d,running''', '''UNwant\u00E9d,running''' ) self.assertListEqual(inputs['''token_type_ids'''], [2] + [0] * sentence_len + [1] * sentence_len )
75
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available a_ : Optional[int] = { """configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : str = [ """GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""", """GPTNeoForCausalLM""", """GPTNeoForQuestionAnswering""", """GPTNeoForSequenceClassification""", """GPTNeoForTokenClassification""", """GPTNeoModel""", """GPTNeoPreTrainedModel""", """load_tf_weights_in_gpt_neo""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : List[Any] = [ """FlaxGPTNeoForCausalLM""", """FlaxGPTNeoModel""", """FlaxGPTNeoPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neo import ( GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoForCausalLM, GPTNeoForQuestionAnswering, GPTNeoForSequenceClassification, GPTNeoForTokenClassification, GPTNeoModel, GPTNeoPreTrainedModel, load_tf_weights_in_gpt_neo, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel else: import sys a_ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
75
'''simple docstring''' import argparse import os import re import torch from flax.traverse_util import flatten_dict from tax import checkpoints from transformers import ( AutoTokenizer, PixaStructConfig, PixaStructForConditionalGeneration, PixaStructImageProcessor, PixaStructProcessor, PixaStructTextConfig, PixaStructVisionConfig, ) def a_ ( __snake_case : Any ) -> int: """simple docstring""" lowerCamelCase_ =checkpoints.load_tax_checkpoint(__snake_case ) lowerCamelCase_ =flatten_dict(__snake_case ) return flax_params def a_ ( __snake_case : Dict ) -> Optional[int]: """simple docstring""" lowerCamelCase_ ={} lowerCamelCase_ ={ '''token_embedder''': '''embeddings''', '''encoder_norm''': '''layernorm''', '''kernel''': '''weight''', '''.out''': '''.output''', '''scale''': '''weight''', '''embedders_0.pos_embedding''': '''row_embedder.weight''', '''embedders_1.pos_embedding''': '''column_embedder.weight''', } lowerCamelCase_ ={ '''query''': '''attention.query''', '''key''': '''attention.key''', '''value''': '''attention.value''', '''output.dense''': '''output''', '''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''', '''pre_self_attention_layer_norm''': '''self_attention.layer_norm''', '''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''', '''mlp.''': '''mlp.DenseReluDense.''', '''pre_mlp_layer_norm''': '''mlp.layer_norm''', '''self_attention.o''': '''self_attention.attention.o''', '''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''', '''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''', '''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''', '''decoder.logits_dense.weight''': '''decoder.lm_head.weight''', } for key in flax_dict.keys(): if "target" in key: # remove the first prefix from the key lowerCamelCase_ ='''.'''.join(key[1:] ) # rename the key for old, new in CONVERSION_MAPPING.items(): lowerCamelCase_ =new_key.replace(__snake_case , __snake_case ) if "decoder" in new_key: for old, new in DECODER_CONVERSION_MAPPING.items(): lowerCamelCase_ =new_key.replace(__snake_case , __snake_case ) if "layers" in new_key and "decoder" not in new_key: # use regex to replace the layer number lowerCamelCase_ =re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , __snake_case ) lowerCamelCase_ =new_key.replace('''encoder''' , '''encoder.encoder''' ) elif "layers" in new_key and "decoder" in new_key: # use regex to replace the layer number lowerCamelCase_ =re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , __snake_case ) lowerCamelCase_ =flax_dict[key] lowerCamelCase_ ={} # convert converted_dict into torch format for key in converted_dict.keys(): if ("embed_tokens" not in key) and ("embedder" not in key): lowerCamelCase_ =torch.from_numpy(converted_dict[key].T ) else: lowerCamelCase_ =torch.from_numpy(converted_dict[key] ) return converted_torch_dict def a_ ( __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Any=False , __snake_case : Optional[int]=False ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ =get_flax_param(__snake_case ) if not use_large: lowerCamelCase_ =PixaStructVisionConfig() lowerCamelCase_ =PixaStructTextConfig() else: lowerCamelCase_ =PixaStructVisionConfig( hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 ) lowerCamelCase_ =PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 ) lowerCamelCase_ =PixaStructConfig( vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=__snake_case ) lowerCamelCase_ =PixaStructForConditionalGeneration(__snake_case ) lowerCamelCase_ =rename_and_convert_flax_params(__snake_case ) model.load_state_dict(__snake_case ) lowerCamelCase_ =AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' ) lowerCamelCase_ =PixaStructImageProcessor() lowerCamelCase_ =PixaStructProcessor(image_processor=__snake_case , tokenizer=__snake_case ) if use_large: lowerCamelCase_ =4096 lowerCamelCase_ =True # mkdir if needed os.makedirs(__snake_case , exist_ok=__snake_case ) model.save_pretrained(__snake_case ) processor.save_pretrained(__snake_case ) print('''Model saved in {}'''.format(__snake_case ) ) if __name__ == "__main__": a_ : Optional[int] = argparse.ArgumentParser() parser.add_argument("""--t5x_checkpoint_path""", default=None, type=str, help="""Path to the original T5x checkpoint.""") parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--use_large""", action="""store_true""", help="""Use large model.""") parser.add_argument("""--is_vqa""", action="""store_true""", help="""Use large model.""") a_ : Tuple = parser.parse_args() convert_pixastruct_original_pytorch_checkpoint_to_hf( args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large )
75
1
'''simple docstring''' import fire from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer def a_ ( __snake_case : str , __snake_case : str , **__snake_case : Optional[Any] ) -> int: """simple docstring""" lowerCamelCase_ =AutoConfig.from_pretrained(__snake_case , **__snake_case ) lowerCamelCase_ =AutoModelForSeqaSeqLM.from_config(__snake_case ) model.save_pretrained(__snake_case ) AutoTokenizer.from_pretrained(__snake_case ).save_pretrained(__snake_case ) return model if __name__ == "__main__": fire.Fire(save_randomly_initialized_version)
75
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging a_ : Union[str, Any] = logging.get_logger(__name__) if is_vision_available(): import PIL class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Tuple =['pixel_values'] def __init__( self, lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = PILImageResampling.BICUBIC, lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = True, lowerCAmelCase = 1 / 255, lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = True, **lowerCAmelCase, ): """simple docstring""" super().__init__(**lowerCAmelCase ) lowerCamelCase_ =size if size is not None else {'''shortest_edge''': 224} lowerCamelCase_ =get_size_dict(lowerCAmelCase, default_to_square=lowerCAmelCase ) lowerCamelCase_ =crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} lowerCamelCase_ =get_size_dict(lowerCAmelCase, default_to_square=lowerCAmelCase, param_name='''crop_size''' ) lowerCamelCase_ =do_resize lowerCamelCase_ =size lowerCamelCase_ =resample lowerCamelCase_ =do_center_crop lowerCamelCase_ =crop_size lowerCamelCase_ =do_rescale lowerCamelCase_ =rescale_factor lowerCamelCase_ =do_normalize lowerCamelCase_ =image_mean if image_mean is not None else OPENAI_CLIP_MEAN lowerCamelCase_ =image_std if image_std is not None else OPENAI_CLIP_STD lowerCamelCase_ =do_convert_rgb def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = PILImageResampling.BICUBIC, lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =get_size_dict(lowerCAmelCase, default_to_square=lowerCAmelCase ) if "shortest_edge" not in size: raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) lowerCamelCase_ =get_resize_output_image_size(lowerCAmelCase, size=size['''shortest_edge'''], default_to_square=lowerCAmelCase ) return resize(lowerCAmelCase, size=lowerCAmelCase, resample=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =get_size_dict(lowerCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(lowerCAmelCase, size=(size['''height'''], size['''width''']), data_format=lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" return rescale(lowerCAmelCase, scale=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" return normalize(lowerCAmelCase, mean=lowerCAmelCase, std=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = ChannelDimension.FIRST, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =do_resize if do_resize is not None else self.do_resize lowerCamelCase_ =size if size is not None else self.size lowerCamelCase_ =get_size_dict(lowerCAmelCase, param_name='''size''', default_to_square=lowerCAmelCase ) lowerCamelCase_ =resample if resample is not None else self.resample lowerCamelCase_ =do_center_crop if do_center_crop is not None else self.do_center_crop lowerCamelCase_ =crop_size if crop_size is not None else self.crop_size lowerCamelCase_ =get_size_dict(lowerCAmelCase, param_name='''crop_size''', default_to_square=lowerCAmelCase ) lowerCamelCase_ =do_rescale if do_rescale is not None else self.do_rescale lowerCamelCase_ =rescale_factor if rescale_factor is not None else self.rescale_factor lowerCamelCase_ =do_normalize if do_normalize is not None else self.do_normalize lowerCamelCase_ =image_mean if image_mean is not None else self.image_mean lowerCamelCase_ =image_std if image_std is not None else self.image_std lowerCamelCase_ =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb lowerCamelCase_ =make_list_of_images(lowerCAmelCase ) if not valid_images(lowerCAmelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: lowerCamelCase_ =[convert_to_rgb(lowerCAmelCase ) for image in images] # All transformations expect numpy arrays. lowerCamelCase_ =[to_numpy_array(lowerCAmelCase ) for image in images] if do_resize: lowerCamelCase_ =[self.resize(image=lowerCAmelCase, size=lowerCAmelCase, resample=lowerCAmelCase ) for image in images] if do_center_crop: lowerCamelCase_ =[self.center_crop(image=lowerCAmelCase, size=lowerCAmelCase ) for image in images] if do_rescale: lowerCamelCase_ =[self.rescale(image=lowerCAmelCase, scale=lowerCAmelCase ) for image in images] if do_normalize: lowerCamelCase_ =[self.normalize(image=lowerCAmelCase, mean=lowerCAmelCase, std=lowerCAmelCase ) for image in images] lowerCamelCase_ =[to_channel_dimension_format(lowerCAmelCase, lowerCAmelCase ) for image in images] lowerCamelCase_ ={'''pixel_values''': images} return BatchFeature(data=lowerCAmelCase, tensor_type=lowerCAmelCase )
75
1
'''simple docstring''' import importlib import inspect import json import os import re import shutil import sys from pathlib import Path from typing import Dict, Optional, Union from urllib import request from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info from packaging import version from .. import __version__ from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging a_ : List[Any] = ( """https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py""" ) a_ : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name def a_ ( ) -> List[str]: """simple docstring""" lowerCamelCase_ ='''https://pypi.org/pypi/diffusers/json''' lowerCamelCase_ =json.loads(request.urlopen(__snake_case ).read() )['''releases'''].keys() return sorted(__snake_case , key=lambda __snake_case : version.Version(__snake_case ) ) def a_ ( ) -> str: """simple docstring""" # This function has already been executed if HF_MODULES_CACHE already is in the Python path. if HF_MODULES_CACHE in sys.path: return sys.path.append(__snake_case ) os.makedirs(__snake_case , exist_ok=__snake_case ) lowerCamelCase_ =Path(__snake_case ) / '''__init__.py''' if not init_path.exists(): init_path.touch() def a_ ( __snake_case : Union[str, os.PathLike] ) -> List[str]: """simple docstring""" init_hf_modules() lowerCamelCase_ =Path(__snake_case ) / name # If the parent module does not exist yet, recursively create it. if not dynamic_module_path.parent.exists(): create_dynamic_module(dynamic_module_path.parent ) os.makedirs(__snake_case , exist_ok=__snake_case ) lowerCamelCase_ =dynamic_module_path / '''__init__.py''' if not init_path.exists(): init_path.touch() def a_ ( __snake_case : Tuple ) -> List[str]: """simple docstring""" with open(__snake_case , '''r''' , encoding='''utf-8''' ) as f: lowerCamelCase_ =f.read() # Imports of the form `import .xxx` lowerCamelCase_ =re.findall('''^\s*import\s+\.(\S+)\s*$''' , __snake_case , flags=re.MULTILINE ) # Imports of the form `from .xxx import yyy` relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , __snake_case , flags=re.MULTILINE ) # Unique-ify return list(set(__snake_case ) ) def a_ ( __snake_case : str ) -> str: """simple docstring""" lowerCamelCase_ =False lowerCamelCase_ =[module_file] lowerCamelCase_ =[] # Let's recurse through all relative imports while not no_change: lowerCamelCase_ =[] for f in files_to_check: new_imports.extend(get_relative_imports(__snake_case ) ) lowerCamelCase_ =Path(__snake_case ).parent lowerCamelCase_ =[str(module_path / m ) for m in new_imports] lowerCamelCase_ =[f for f in new_import_files if f not in all_relative_imports] lowerCamelCase_ =[F'''{f}.py''' for f in new_import_files] lowerCamelCase_ =len(__snake_case ) == 0 all_relative_imports.extend(__snake_case ) return all_relative_imports def a_ ( __snake_case : Union[str, Any] ) -> Optional[int]: """simple docstring""" with open(__snake_case , '''r''' , encoding='''utf-8''' ) as f: lowerCamelCase_ =f.read() # Imports of the form `import xxx` lowerCamelCase_ =re.findall('''^\s*import\s+(\S+)\s*$''' , __snake_case , flags=re.MULTILINE ) # Imports of the form `from xxx import yyy` imports += re.findall('''^\s*from\s+(\S+)\s+import''' , __snake_case , flags=re.MULTILINE ) # Only keep the top-level module lowerCamelCase_ =[imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )] # Unique-ify and test we got them all lowerCamelCase_ =list(set(__snake_case ) ) lowerCamelCase_ =[] for imp in imports: try: importlib.import_module(__snake_case ) except ImportError: missing_packages.append(__snake_case ) if len(__snake_case ) > 0: raise ImportError( '''This modeling file requires the following packages that were not found in your environment: ''' F'''{', '.join(__snake_case )}. Run `pip install {' '.join(__snake_case )}`''' ) return get_relative_imports(__snake_case ) def a_ ( __snake_case : Tuple , __snake_case : Tuple ) -> List[Any]: """simple docstring""" lowerCamelCase_ =module_path.replace(os.path.sep , '''.''' ) lowerCamelCase_ =importlib.import_module(__snake_case ) if class_name is None: return find_pipeline_class(__snake_case ) return getattr(__snake_case , __snake_case ) def a_ ( __snake_case : Dict ) -> Any: """simple docstring""" from ..pipelines import DiffusionPipeline lowerCamelCase_ =dict(inspect.getmembers(__snake_case , inspect.isclass ) ) lowerCamelCase_ =None for cls_name, cls in cls_members.items(): if ( cls_name != DiffusionPipeline.__name__ and issubclass(cls , __snake_case ) and cls.__module__.split('''.''' )[0] != "diffusers" ): if pipeline_class is not None: raise ValueError( F'''Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:''' F''' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in''' F''' {loaded_module}.''' ) lowerCamelCase_ =cls return pipeline_class def a_ ( __snake_case : Union[str, os.PathLike] , __snake_case : str , __snake_case : Optional[Union[str, os.PathLike]] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[Dict[str, str]] = None , __snake_case : Optional[Union[bool, str]] = None , __snake_case : Optional[str] = None , __snake_case : bool = False , ) -> Optional[int]: """simple docstring""" lowerCamelCase_ =str(__snake_case ) lowerCamelCase_ =os.path.join(__snake_case , __snake_case ) if os.path.isfile(__snake_case ): lowerCamelCase_ =module_file_or_url lowerCamelCase_ ='''local''' elif pretrained_model_name_or_path.count('''/''' ) == 0: lowerCamelCase_ =get_diffusers_versions() # cut ".dev0" lowerCamelCase_ ='''v''' + '''.'''.join(__version__.split('''.''' )[:3] ) # retrieve github version that matches if revision is None: lowerCamelCase_ =latest_version if latest_version[1:] in available_versions else '''main''' logger.info(F'''Defaulting to latest_version: {revision}.''' ) elif revision in available_versions: lowerCamelCase_ =F'''v{revision}''' elif revision == "main": lowerCamelCase_ =revision else: raise ValueError( F'''`custom_revision`: {revision} does not exist. Please make sure to choose one of''' F''' {', '.join(available_versions + ['main'] )}.''' ) # community pipeline on GitHub lowerCamelCase_ =COMMUNITY_PIPELINES_URL.format(revision=__snake_case , pipeline=__snake_case ) try: lowerCamelCase_ =cached_download( __snake_case , cache_dir=__snake_case , force_download=__snake_case , proxies=__snake_case , resume_download=__snake_case , local_files_only=__snake_case , use_auth_token=__snake_case , ) lowerCamelCase_ ='''git''' lowerCamelCase_ =pretrained_model_name_or_path + '''.py''' except EnvironmentError: logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' ) raise else: try: # Load from URL or cache if already cached lowerCamelCase_ =hf_hub_download( __snake_case , __snake_case , cache_dir=__snake_case , force_download=__snake_case , proxies=__snake_case , resume_download=__snake_case , local_files_only=__snake_case , use_auth_token=__snake_case , ) lowerCamelCase_ =os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) ) except EnvironmentError: logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' ) raise # Check we have all the requirements in our environment lowerCamelCase_ =check_imports(__snake_case ) # Now we move the module inside our cached dynamic modules. lowerCamelCase_ =DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule create_dynamic_module(__snake_case ) lowerCamelCase_ =Path(__snake_case ) / full_submodule if submodule == "local" or submodule == "git": # We always copy local files (we could hash the file to see if there was a change, and give them the name of # that hash, to only copy when there is a modification but it seems overkill for now). # The only reason we do the copy is to avoid putting too many folders in sys.path. shutil.copy(__snake_case , submodule_path / module_file ) for module_needed in modules_needed: lowerCamelCase_ =F'''{module_needed}.py''' shutil.copy(os.path.join(__snake_case , __snake_case ) , submodule_path / module_needed ) else: # Get the commit hash # TODO: we will get this info in the etag soon, so retrieve it from there and not here. if isinstance(__snake_case , __snake_case ): lowerCamelCase_ =use_auth_token elif use_auth_token is True: lowerCamelCase_ =HfFolder.get_token() else: lowerCamelCase_ =None lowerCamelCase_ =model_info(__snake_case , revision=__snake_case , token=__snake_case ).sha # The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the # benefit of versioning. lowerCamelCase_ =submodule_path / commit_hash lowerCamelCase_ =full_submodule + os.path.sep + commit_hash create_dynamic_module(__snake_case ) if not (submodule_path / module_file).exists(): shutil.copy(__snake_case , submodule_path / module_file ) # Make sure we also have every file with relative for module_needed in modules_needed: if not (submodule_path / module_needed).exists(): get_cached_module_file( __snake_case , F'''{module_needed}.py''' , cache_dir=__snake_case , force_download=__snake_case , resume_download=__snake_case , proxies=__snake_case , use_auth_token=__snake_case , revision=__snake_case , local_files_only=__snake_case , ) return os.path.join(__snake_case , __snake_case ) def a_ ( __snake_case : Union[str, os.PathLike] , __snake_case : str , __snake_case : Optional[str] = None , __snake_case : Optional[Union[str, os.PathLike]] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[Dict[str, str]] = None , __snake_case : Optional[Union[bool, str]] = None , __snake_case : Optional[str] = None , __snake_case : bool = False , **__snake_case : Optional[int] , ) -> Optional[int]: """simple docstring""" lowerCamelCase_ =get_cached_module_file( __snake_case , __snake_case , cache_dir=__snake_case , force_download=__snake_case , resume_download=__snake_case , proxies=__snake_case , use_auth_token=__snake_case , revision=__snake_case , local_files_only=__snake_case , ) return get_class_in_module(__snake_case , final_module.replace('''.py''' , '''''' ) )
75
'''simple docstring''' from __future__ import annotations def a_ ( __snake_case : str , __snake_case : list[str] | None = None , __snake_case : dict[str, float] | None = None , __snake_case : bool = False , ) -> tuple[int, float, str]: """simple docstring""" lowerCamelCase_ =cipher_alphabet or [chr(__snake_case ) for i in range(97 , 123 )] # If the argument is None or the user provided an empty dictionary if not frequencies_dict: # Frequencies of letters in the english language (how much they show up) lowerCamelCase_ ={ '''a''': 0.0_8_4_9_7, '''b''': 0.0_1_4_9_2, '''c''': 0.0_2_2_0_2, '''d''': 0.0_4_2_5_3, '''e''': 0.1_1_1_6_2, '''f''': 0.0_2_2_2_8, '''g''': 0.0_2_0_1_5, '''h''': 0.0_6_0_9_4, '''i''': 0.0_7_5_4_6, '''j''': 0.0_0_1_5_3, '''k''': 0.0_1_2_9_2, '''l''': 0.0_4_0_2_5, '''m''': 0.0_2_4_0_6, '''n''': 0.0_6_7_4_9, '''o''': 0.0_7_5_0_7, '''p''': 0.0_1_9_2_9, '''q''': 0.0_0_0_9_5, '''r''': 0.0_7_5_8_7, '''s''': 0.0_6_3_2_7, '''t''': 0.0_9_3_5_6, '''u''': 0.0_2_7_5_8, '''v''': 0.0_0_9_7_8, '''w''': 0.0_2_5_6_0, '''x''': 0.0_0_1_5_0, '''y''': 0.0_1_9_9_4, '''z''': 0.0_0_0_7_7, } else: # Custom frequencies dictionary lowerCamelCase_ =frequencies_dict if not case_sensitive: lowerCamelCase_ =ciphertext.lower() # Chi squared statistic values lowerCamelCase_ ={} # cycle through all of the shifts for shift in range(len(__snake_case ) ): lowerCamelCase_ ='''''' # decrypt the message with the shift for letter in ciphertext: try: # Try to index the letter in the alphabet lowerCamelCase_ =(alphabet_letters.index(letter.lower() ) - shift) % len( __snake_case ) decrypted_with_shift += ( alphabet_letters[new_key].upper() if case_sensitive and letter.isupper() else alphabet_letters[new_key] ) except ValueError: # Append the character if it isn't in the alphabet decrypted_with_shift += letter lowerCamelCase_ =0.0 # Loop through each letter in the decoded message with the shift for letter in decrypted_with_shift: if case_sensitive: lowerCamelCase_ =letter.lower() if letter in frequencies: # Get the amount of times the letter occurs in the message lowerCamelCase_ =decrypted_with_shift.lower().count(__snake_case ) # Get the excepcted amount of times the letter should appear based # on letter frequencies lowerCamelCase_ =frequencies[letter] * occurrences # Complete the chi squared statistic formula lowerCamelCase_ =((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value else: if letter.lower() in frequencies: # Get the amount of times the letter occurs in the message lowerCamelCase_ =decrypted_with_shift.count(__snake_case ) # Get the excepcted amount of times the letter should appear based # on letter frequencies lowerCamelCase_ =frequencies[letter] * occurrences # Complete the chi squared statistic formula lowerCamelCase_ =((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary lowerCamelCase_ =( chi_squared_statistic, decrypted_with_shift, ) # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic def chi_squared_statistic_values_sorting_key(__snake_case : int ) -> tuple[float, str]: return chi_squared_statistic_values[key] lowerCamelCase_ =min( __snake_case , key=__snake_case , ) # Get all the data from the most likely cipher (key, decoded message) ( ( lowerCamelCase_ ), ( lowerCamelCase_ ), ) =chi_squared_statistic_values[most_likely_cipher] # Return the data on the most likely shift return ( most_likely_cipher, most_likely_cipher_chi_squared_value, decoded_most_likely_cipher, )
75
1
'''simple docstring''' import copy import os import cva import numpy as np from matplotlib import pyplot as plt class __UpperCamelCase : def __init__( self ): """simple docstring""" lowerCamelCase_ ='''''' lowerCamelCase_ ='''''' lowerCamelCase_ =[] lowerCamelCase_ =0 lowerCamelCase_ =256 lowerCamelCase_ =0 lowerCamelCase_ =0 lowerCamelCase_ =0 lowerCamelCase_ =0 def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =cva.imread(lowerCAmelCase, 0 ) lowerCamelCase_ =copy.deepcopy(self.img ) lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =plt.hist(self.img.ravel(), 256, [0, 256], label='''x''' ) lowerCamelCase_ =np.sum(lowerCAmelCase ) for i in range(len(lowerCAmelCase ) ): lowerCamelCase_ =x[i] / self.k self.sk += prk lowerCamelCase_ =(self.L - 1) * self.sk if self.rem != 0: lowerCamelCase_ =int(last % last ) lowerCamelCase_ =int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(lowerCAmelCase ) lowerCamelCase_ =int(np.ma.count(self.img ) / self.img[1].size ) lowerCamelCase_ =self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): lowerCamelCase_ =self.img[j][i] if num != self.last_list[num]: lowerCamelCase_ =self.last_list[num] cva.imwrite('''output_data/output.jpg''', self.img ) def lowercase__ ( self ): """simple docstring""" plt.hist(self.img.ravel(), 256, [0, 256] ) def lowercase__ ( self ): """simple docstring""" cva.imshow('''Output-Image''', self.img ) cva.imshow('''Input-Image''', self.original_image ) cva.waitKey(5_000 ) cva.destroyAllWindows() if __name__ == "__main__": a_ : str = os.path.join(os.path.basename(__file__), """image_data/input.jpg""") a_ : Optional[Any] = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
75
'''simple docstring''' import importlib import inspect import json import os import re import shutil import sys from pathlib import Path from typing import Dict, Optional, Union from urllib import request from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info from packaging import version from .. import __version__ from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging a_ : List[Any] = ( """https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py""" ) a_ : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name def a_ ( ) -> List[str]: """simple docstring""" lowerCamelCase_ ='''https://pypi.org/pypi/diffusers/json''' lowerCamelCase_ =json.loads(request.urlopen(__snake_case ).read() )['''releases'''].keys() return sorted(__snake_case , key=lambda __snake_case : version.Version(__snake_case ) ) def a_ ( ) -> str: """simple docstring""" # This function has already been executed if HF_MODULES_CACHE already is in the Python path. if HF_MODULES_CACHE in sys.path: return sys.path.append(__snake_case ) os.makedirs(__snake_case , exist_ok=__snake_case ) lowerCamelCase_ =Path(__snake_case ) / '''__init__.py''' if not init_path.exists(): init_path.touch() def a_ ( __snake_case : Union[str, os.PathLike] ) -> List[str]: """simple docstring""" init_hf_modules() lowerCamelCase_ =Path(__snake_case ) / name # If the parent module does not exist yet, recursively create it. if not dynamic_module_path.parent.exists(): create_dynamic_module(dynamic_module_path.parent ) os.makedirs(__snake_case , exist_ok=__snake_case ) lowerCamelCase_ =dynamic_module_path / '''__init__.py''' if not init_path.exists(): init_path.touch() def a_ ( __snake_case : Tuple ) -> List[str]: """simple docstring""" with open(__snake_case , '''r''' , encoding='''utf-8''' ) as f: lowerCamelCase_ =f.read() # Imports of the form `import .xxx` lowerCamelCase_ =re.findall('''^\s*import\s+\.(\S+)\s*$''' , __snake_case , flags=re.MULTILINE ) # Imports of the form `from .xxx import yyy` relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , __snake_case , flags=re.MULTILINE ) # Unique-ify return list(set(__snake_case ) ) def a_ ( __snake_case : str ) -> str: """simple docstring""" lowerCamelCase_ =False lowerCamelCase_ =[module_file] lowerCamelCase_ =[] # Let's recurse through all relative imports while not no_change: lowerCamelCase_ =[] for f in files_to_check: new_imports.extend(get_relative_imports(__snake_case ) ) lowerCamelCase_ =Path(__snake_case ).parent lowerCamelCase_ =[str(module_path / m ) for m in new_imports] lowerCamelCase_ =[f for f in new_import_files if f not in all_relative_imports] lowerCamelCase_ =[F'''{f}.py''' for f in new_import_files] lowerCamelCase_ =len(__snake_case ) == 0 all_relative_imports.extend(__snake_case ) return all_relative_imports def a_ ( __snake_case : Union[str, Any] ) -> Optional[int]: """simple docstring""" with open(__snake_case , '''r''' , encoding='''utf-8''' ) as f: lowerCamelCase_ =f.read() # Imports of the form `import xxx` lowerCamelCase_ =re.findall('''^\s*import\s+(\S+)\s*$''' , __snake_case , flags=re.MULTILINE ) # Imports of the form `from xxx import yyy` imports += re.findall('''^\s*from\s+(\S+)\s+import''' , __snake_case , flags=re.MULTILINE ) # Only keep the top-level module lowerCamelCase_ =[imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )] # Unique-ify and test we got them all lowerCamelCase_ =list(set(__snake_case ) ) lowerCamelCase_ =[] for imp in imports: try: importlib.import_module(__snake_case ) except ImportError: missing_packages.append(__snake_case ) if len(__snake_case ) > 0: raise ImportError( '''This modeling file requires the following packages that were not found in your environment: ''' F'''{', '.join(__snake_case )}. Run `pip install {' '.join(__snake_case )}`''' ) return get_relative_imports(__snake_case ) def a_ ( __snake_case : Tuple , __snake_case : Tuple ) -> List[Any]: """simple docstring""" lowerCamelCase_ =module_path.replace(os.path.sep , '''.''' ) lowerCamelCase_ =importlib.import_module(__snake_case ) if class_name is None: return find_pipeline_class(__snake_case ) return getattr(__snake_case , __snake_case ) def a_ ( __snake_case : Dict ) -> Any: """simple docstring""" from ..pipelines import DiffusionPipeline lowerCamelCase_ =dict(inspect.getmembers(__snake_case , inspect.isclass ) ) lowerCamelCase_ =None for cls_name, cls in cls_members.items(): if ( cls_name != DiffusionPipeline.__name__ and issubclass(cls , __snake_case ) and cls.__module__.split('''.''' )[0] != "diffusers" ): if pipeline_class is not None: raise ValueError( F'''Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:''' F''' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in''' F''' {loaded_module}.''' ) lowerCamelCase_ =cls return pipeline_class def a_ ( __snake_case : Union[str, os.PathLike] , __snake_case : str , __snake_case : Optional[Union[str, os.PathLike]] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[Dict[str, str]] = None , __snake_case : Optional[Union[bool, str]] = None , __snake_case : Optional[str] = None , __snake_case : bool = False , ) -> Optional[int]: """simple docstring""" lowerCamelCase_ =str(__snake_case ) lowerCamelCase_ =os.path.join(__snake_case , __snake_case ) if os.path.isfile(__snake_case ): lowerCamelCase_ =module_file_or_url lowerCamelCase_ ='''local''' elif pretrained_model_name_or_path.count('''/''' ) == 0: lowerCamelCase_ =get_diffusers_versions() # cut ".dev0" lowerCamelCase_ ='''v''' + '''.'''.join(__version__.split('''.''' )[:3] ) # retrieve github version that matches if revision is None: lowerCamelCase_ =latest_version if latest_version[1:] in available_versions else '''main''' logger.info(F'''Defaulting to latest_version: {revision}.''' ) elif revision in available_versions: lowerCamelCase_ =F'''v{revision}''' elif revision == "main": lowerCamelCase_ =revision else: raise ValueError( F'''`custom_revision`: {revision} does not exist. Please make sure to choose one of''' F''' {', '.join(available_versions + ['main'] )}.''' ) # community pipeline on GitHub lowerCamelCase_ =COMMUNITY_PIPELINES_URL.format(revision=__snake_case , pipeline=__snake_case ) try: lowerCamelCase_ =cached_download( __snake_case , cache_dir=__snake_case , force_download=__snake_case , proxies=__snake_case , resume_download=__snake_case , local_files_only=__snake_case , use_auth_token=__snake_case , ) lowerCamelCase_ ='''git''' lowerCamelCase_ =pretrained_model_name_or_path + '''.py''' except EnvironmentError: logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' ) raise else: try: # Load from URL or cache if already cached lowerCamelCase_ =hf_hub_download( __snake_case , __snake_case , cache_dir=__snake_case , force_download=__snake_case , proxies=__snake_case , resume_download=__snake_case , local_files_only=__snake_case , use_auth_token=__snake_case , ) lowerCamelCase_ =os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) ) except EnvironmentError: logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' ) raise # Check we have all the requirements in our environment lowerCamelCase_ =check_imports(__snake_case ) # Now we move the module inside our cached dynamic modules. lowerCamelCase_ =DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule create_dynamic_module(__snake_case ) lowerCamelCase_ =Path(__snake_case ) / full_submodule if submodule == "local" or submodule == "git": # We always copy local files (we could hash the file to see if there was a change, and give them the name of # that hash, to only copy when there is a modification but it seems overkill for now). # The only reason we do the copy is to avoid putting too many folders in sys.path. shutil.copy(__snake_case , submodule_path / module_file ) for module_needed in modules_needed: lowerCamelCase_ =F'''{module_needed}.py''' shutil.copy(os.path.join(__snake_case , __snake_case ) , submodule_path / module_needed ) else: # Get the commit hash # TODO: we will get this info in the etag soon, so retrieve it from there and not here. if isinstance(__snake_case , __snake_case ): lowerCamelCase_ =use_auth_token elif use_auth_token is True: lowerCamelCase_ =HfFolder.get_token() else: lowerCamelCase_ =None lowerCamelCase_ =model_info(__snake_case , revision=__snake_case , token=__snake_case ).sha # The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the # benefit of versioning. lowerCamelCase_ =submodule_path / commit_hash lowerCamelCase_ =full_submodule + os.path.sep + commit_hash create_dynamic_module(__snake_case ) if not (submodule_path / module_file).exists(): shutil.copy(__snake_case , submodule_path / module_file ) # Make sure we also have every file with relative for module_needed in modules_needed: if not (submodule_path / module_needed).exists(): get_cached_module_file( __snake_case , F'''{module_needed}.py''' , cache_dir=__snake_case , force_download=__snake_case , resume_download=__snake_case , proxies=__snake_case , use_auth_token=__snake_case , revision=__snake_case , local_files_only=__snake_case , ) return os.path.join(__snake_case , __snake_case ) def a_ ( __snake_case : Union[str, os.PathLike] , __snake_case : str , __snake_case : Optional[str] = None , __snake_case : Optional[Union[str, os.PathLike]] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[Dict[str, str]] = None , __snake_case : Optional[Union[bool, str]] = None , __snake_case : Optional[str] = None , __snake_case : bool = False , **__snake_case : Optional[int] , ) -> Optional[int]: """simple docstring""" lowerCamelCase_ =get_cached_module_file( __snake_case , __snake_case , cache_dir=__snake_case , force_download=__snake_case , resume_download=__snake_case , proxies=__snake_case , use_auth_token=__snake_case , revision=__snake_case , local_files_only=__snake_case , ) return get_class_in_module(__snake_case , final_module.replace('''.py''' , '''''' ) )
75
1
'''simple docstring''' import math def a_ ( __snake_case : list , __snake_case : int = 0 , __snake_case : int = 0 ) -> list: """simple docstring""" lowerCamelCase_ =end or len(__snake_case ) for i in range(__snake_case , __snake_case ): lowerCamelCase_ =i lowerCamelCase_ =array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: lowerCamelCase_ =array[temp_index - 1] temp_index -= 1 lowerCamelCase_ =temp_index_value return array def a_ ( __snake_case : list , __snake_case : int , __snake_case : int ) -> None: # Max Heap """simple docstring""" lowerCamelCase_ =index lowerCamelCase_ =2 * index + 1 # Left Node lowerCamelCase_ =2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: lowerCamelCase_ =left_index if right_index < heap_size and array[largest] < array[right_index]: lowerCamelCase_ =right_index if largest != index: lowerCamelCase_, lowerCamelCase_ =array[largest], array[index] heapify(__snake_case , __snake_case , __snake_case ) def a_ ( __snake_case : list ) -> list: """simple docstring""" lowerCamelCase_ =len(__snake_case ) for i in range(n // 2 , -1 , -1 ): heapify(__snake_case , __snake_case , __snake_case ) for i in range(n - 1 , 0 , -1 ): lowerCamelCase_, lowerCamelCase_ =array[0], array[i] heapify(__snake_case , 0 , __snake_case ) return array def a_ ( __snake_case : list , __snake_case : int , __snake_case : int , __snake_case : int ) -> int: """simple docstring""" if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def a_ ( __snake_case : list , __snake_case : int , __snake_case : int , __snake_case : int ) -> int: """simple docstring""" lowerCamelCase_ =low lowerCamelCase_ =high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i lowerCamelCase_, lowerCamelCase_ =array[j], array[i] i += 1 def a_ ( __snake_case : list ) -> list: """simple docstring""" if len(__snake_case ) == 0: return array lowerCamelCase_ =2 * math.ceil(math.loga(len(__snake_case ) ) ) lowerCamelCase_ =16 return intro_sort(__snake_case , 0 , len(__snake_case ) , __snake_case , __snake_case ) def a_ ( __snake_case : list , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int ) -> list: """simple docstring""" while end - start > size_threshold: if max_depth == 0: return heap_sort(__snake_case ) max_depth -= 1 lowerCamelCase_ =median_of_a(__snake_case , __snake_case , start + ((end - start) // 2) + 1 , end - 1 ) lowerCamelCase_ =partition(__snake_case , __snake_case , __snake_case , __snake_case ) intro_sort(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) lowerCamelCase_ =p return insertion_sort(__snake_case , __snake_case , __snake_case ) if __name__ == "__main__": import doctest doctest.testmod() a_ : Optional[Any] = input("""Enter numbers separated by a comma : """).strip() a_ : List[Any] = [float(item) for item in user_input.split(""",""")] print(sort(unsorted))
75
'''simple docstring''' a_ : Any = [ 9_99, 8_00, 7_99, 6_00, 5_99, 5_00, 4_00, 3_99, 3_77, 3_55, 3_33, 3_11, 2_88, 2_66, 2_44, 2_22, 2_00, 1_99, 1_77, 1_55, 1_33, 1_11, 88, 66, 44, 22, 0, ] a_ : Any = [ 9_99, 9_76, 9_52, 9_28, 9_05, 8_82, 8_58, 8_57, 8_10, 7_62, 7_15, 7_14, 5_72, 4_29, 4_28, 2_86, 2_85, 2_38, 1_90, 1_43, 1_42, 1_18, 95, 71, 47, 24, 0, ] a_ : Optional[Any] = [ 9_99, 9_88, 9_77, 9_66, 9_55, 9_44, 9_33, 9_22, 9_11, 9_00, 8_99, 8_79, 8_59, 8_40, 8_20, 8_00, 7_99, 7_66, 7_33, 7_00, 6_99, 6_50, 6_00, 5_99, 5_00, 4_99, 4_00, 3_99, 3_50, 3_00, 2_99, 2_66, 2_33, 2_00, 1_99, 1_79, 1_59, 1_40, 1_20, 1_00, 99, 88, 77, 66, 55, 44, 33, 22, 11, 0, ] a_ : str = [ 9_99, 9_95, 9_92, 9_89, 9_85, 9_81, 9_78, 9_75, 9_71, 9_67, 9_64, 9_61, 9_57, 9_56, 9_51, 9_47, 9_42, 9_37, 9_33, 9_28, 9_23, 9_19, 9_14, 9_13, 9_08, 9_03, 8_97, 8_92, 8_87, 8_81, 8_76, 8_71, 8_70, 8_64, 8_58, 8_52, 8_46, 8_40, 8_34, 8_28, 8_27, 8_20, 8_13, 8_06, 7_99, 7_92, 7_85, 7_84, 7_77, 7_70, 7_63, 7_56, 7_49, 7_42, 7_41, 7_33, 7_24, 7_16, 7_07, 6_99, 6_98, 6_88, 6_77, 6_66, 6_56, 6_55, 6_45, 6_34, 6_23, 6_13, 6_12, 5_98, 5_84, 5_70, 5_69, 5_55, 5_41, 5_27, 5_26, 5_05, 4_84, 4_83, 4_62, 4_40, 4_39, 3_96, 3_95, 3_52, 3_51, 3_08, 3_07, 2_64, 2_63, 2_20, 2_19, 1_76, 1_32, 88, 44, 0, ] a_ : Optional[int] = [ 9_99, 9_97, 9_95, 9_92, 9_90, 9_88, 9_86, 9_84, 9_81, 9_79, 9_77, 9_75, 9_72, 9_70, 9_68, 9_66, 9_64, 9_61, 9_59, 9_57, 9_56, 9_54, 9_51, 9_49, 9_46, 9_44, 9_41, 9_39, 9_36, 9_34, 9_31, 9_29, 9_26, 9_24, 9_21, 9_19, 9_16, 9_14, 9_13, 9_10, 9_07, 9_05, 9_02, 8_99, 8_96, 8_93, 8_91, 8_88, 8_85, 8_82, 8_79, 8_77, 8_74, 8_71, 8_70, 8_67, 8_64, 8_61, 8_58, 8_55, 8_52, 8_49, 8_46, 8_43, 8_40, 8_37, 8_34, 8_31, 8_28, 8_27, 8_24, 8_21, 8_17, 8_14, 8_11, 8_08, 8_04, 8_01, 7_98, 7_95, 7_91, 7_88, 7_85, 7_84, 7_80, 7_77, 7_74, 7_70, 7_66, 7_63, 7_60, 7_56, 7_52, 7_49, 7_46, 7_42, 7_41, 7_37, 7_33, 7_30, 7_26, 7_22, 7_18, 7_14, 7_10, 7_07, 7_03, 6_99, 6_98, 6_94, 6_90, 6_85, 6_81, 6_77, 6_73, 6_69, 6_64, 6_60, 6_56, 6_55, 6_50, 6_46, 6_41, 6_36, 6_32, 6_27, 6_22, 6_18, 6_13, 6_12, 6_07, 6_02, 5_96, 5_91, 5_86, 5_80, 5_75, 5_70, 5_69, 5_63, 5_57, 5_51, 5_45, 5_39, 5_33, 5_27, 5_26, 5_19, 5_12, 5_05, 4_98, 4_91, 4_84, 4_83, 4_74, 4_66, 4_57, 4_49, 4_40, 4_39, 4_28, 4_18, 4_07, 3_96, 3_95, 3_81, 3_66, 3_52, 3_51, 3_30, 3_08, 3_07, 2_86, 2_64, 2_63, 2_42, 2_20, 2_19, 1_76, 1_75, 1_32, 1_31, 88, 44, 0, ] a_ : Dict = [ 9_99, 9_91, 9_82, 9_74, 9_66, 9_58, 9_50, 9_41, 9_33, 9_25, 9_16, 9_08, 9_00, 8_99, 8_74, 8_50, 8_25, 8_00, 7_99, 7_00, 6_00, 5_00, 4_00, 3_00, 2_00, 1_00, 0, ] a_ : Tuple = [ 9_99, 9_92, 9_85, 9_78, 9_71, 9_64, 9_57, 9_49, 9_42, 9_35, 9_28, 9_21, 9_14, 9_07, 9_00, 8_99, 8_79, 8_59, 8_40, 8_20, 8_00, 7_99, 7_66, 7_33, 7_00, 6_99, 6_50, 6_00, 5_99, 5_00, 4_99, 4_00, 3_99, 3_00, 2_99, 2_00, 1_99, 1_00, 99, 0, ] a_ : Any = [ 9_99, 9_96, 9_92, 9_89, 9_85, 9_82, 9_79, 9_75, 9_72, 9_68, 9_65, 9_61, 9_58, 9_55, 9_51, 9_48, 9_44, 9_41, 9_38, 9_34, 9_31, 9_27, 9_24, 9_20, 9_17, 9_14, 9_10, 9_07, 9_03, 9_00, 8_99, 8_91, 8_84, 8_76, 8_69, 8_61, 8_53, 8_46, 8_38, 8_30, 8_23, 8_15, 8_08, 8_00, 7_99, 7_88, 7_77, 7_66, 7_55, 7_44, 7_33, 7_22, 7_11, 7_00, 6_99, 6_88, 6_77, 6_66, 6_55, 6_44, 6_33, 6_22, 6_11, 6_00, 5_99, 5_85, 5_71, 5_57, 5_42, 5_28, 5_14, 5_00, 4_99, 4_85, 4_71, 4_57, 4_42, 4_28, 4_14, 4_00, 3_99, 3_79, 3_59, 3_40, 3_20, 3_00, 2_99, 2_79, 2_59, 2_40, 2_20, 2_00, 1_99, 1_66, 1_33, 1_00, 99, 66, 33, 0, ]
75
1
'''simple docstring''' import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class __UpperCamelCase ( unittest.TestCase ): @property def lowercase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCamelCase_ =UNetaDModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=('''DownBlock2D''', '''AttnDownBlock2D'''), up_block_types=('''AttnUpBlock2D''', '''UpBlock2D'''), ) return model def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.dummy_uncond_unet lowerCamelCase_ =ScoreSdeVeScheduler() lowerCamelCase_ =ScoreSdeVePipeline(unet=lowerCAmelCase, scheduler=lowerCAmelCase ) sde_ve.to(lowerCAmelCase ) sde_ve.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =torch.manual_seed(0 ) lowerCamelCase_ =sde_ve(num_inference_steps=2, output_type='''numpy''', generator=lowerCAmelCase ).images lowerCamelCase_ =torch.manual_seed(0 ) lowerCamelCase_ =sde_ve(num_inference_steps=2, output_type='''numpy''', generator=lowerCAmelCase, return_dict=lowerCAmelCase )[ 0 ] lowerCamelCase_ =image[0, -3:, -3:, -1] lowerCamelCase_ =image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCamelCase_ =np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class __UpperCamelCase ( unittest.TestCase ): def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''google/ncsnpp-church-256''' lowerCamelCase_ =UNetaDModel.from_pretrained(lowerCAmelCase ) lowerCamelCase_ =ScoreSdeVeScheduler.from_pretrained(lowerCAmelCase ) lowerCamelCase_ =ScoreSdeVePipeline(unet=lowerCAmelCase, scheduler=lowerCAmelCase ) sde_ve.to(lowerCAmelCase ) sde_ve.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =torch.manual_seed(0 ) lowerCamelCase_ =sde_ve(num_inference_steps=10, output_type='''numpy''', generator=lowerCAmelCase ).images lowerCamelCase_ =image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) lowerCamelCase_ =np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
75
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) a_ : Union[str, Any] = { """configuration_funnel""": ["""FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FunnelConfig"""], """convert_funnel_original_tf_checkpoint_to_pytorch""": [], """tokenization_funnel""": ["""FunnelTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : List[str] = ["""FunnelTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[int] = [ """FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""", """FunnelBaseModel""", """FunnelForMaskedLM""", """FunnelForMultipleChoice""", """FunnelForPreTraining""", """FunnelForQuestionAnswering""", """FunnelForSequenceClassification""", """FunnelForTokenClassification""", """FunnelModel""", """FunnelPreTrainedModel""", """load_tf_weights_in_funnel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[Any] = [ """TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFFunnelBaseModel""", """TFFunnelForMaskedLM""", """TFFunnelForMultipleChoice""", """TFFunnelForPreTraining""", """TFFunnelForQuestionAnswering""", """TFFunnelForSequenceClassification""", """TFFunnelForTokenClassification""", """TFFunnelModel""", """TFFunnelPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig from .tokenization_funnel import FunnelTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_funnel_fast import FunnelTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_funnel import ( FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, FunnelBaseModel, FunnelForMaskedLM, FunnelForMultipleChoice, FunnelForPreTraining, FunnelForQuestionAnswering, FunnelForSequenceClassification, FunnelForTokenClassification, FunnelModel, FunnelPreTrainedModel, load_tf_weights_in_funnel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_funnel import ( TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, TFFunnelPreTrainedModel, ) else: import sys a_ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
75
1
'''simple docstring''' from math import factorial, pi def a_ ( __snake_case : float , __snake_case : int = 30 ) -> float: """simple docstring""" if not isinstance(__snake_case , (int, float) ): raise ValueError('''maclaurin_sin() requires either an int or float for theta''' ) if not isinstance(__snake_case , __snake_case ) or accuracy <= 0: raise ValueError('''maclaurin_sin() requires a positive int for accuracy''' ) lowerCamelCase_ =float(__snake_case ) lowerCamelCase_ =theta // (2 * pi) theta -= 2 * div * pi return sum( (-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(__snake_case ) ) def a_ ( __snake_case : float , __snake_case : int = 30 ) -> float: """simple docstring""" if not isinstance(__snake_case , (int, float) ): raise ValueError('''maclaurin_cos() requires either an int or float for theta''' ) if not isinstance(__snake_case , __snake_case ) or accuracy <= 0: raise ValueError('''maclaurin_cos() requires a positive int for accuracy''' ) lowerCamelCase_ =float(__snake_case ) lowerCamelCase_ =theta // (2 * pi) theta -= 2 * div * pi return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(__snake_case ) ) if __name__ == "__main__": import doctest doctest.testmod() print(maclaurin_sin(10)) print(maclaurin_sin(-10)) print(maclaurin_sin(10, 15)) print(maclaurin_sin(-10, 15)) print(maclaurin_cos(5)) print(maclaurin_cos(-5)) print(maclaurin_cos(10, 15)) print(maclaurin_cos(-10, 15))
75
'''simple docstring''' import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoProcessor from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wavaveca.test_feature_extraction_wavaveca import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput if is_torch_available(): from transformers import WavaVecaForCTC @require_pyctcdecode class __UpperCamelCase ( unittest.TestCase ): def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split() lowerCamelCase_ =dict(zip(lowerCAmelCase, range(len(lowerCAmelCase ) ) ) ) lowerCamelCase_ ={ '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>''', } lowerCamelCase_ ={ '''feature_size''': 1, '''padding_value''': 0.0, '''sampling_rate''': 16_000, '''return_attention_mask''': False, '''do_normalize''': True, } lowerCamelCase_ =tempfile.mkdtemp() lowerCamelCase_ =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCamelCase_ =os.path.join(self.tmpdirname, lowerCAmelCase ) with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp: fp.write(json.dumps(lowerCAmelCase ) + '''\n''' ) with open(self.feature_extraction_file, '''w''', encoding='''utf-8''' ) as fp: fp.write(json.dumps(lowerCAmelCase ) + '''\n''' ) # load decoder from hub lowerCamelCase_ ='''hf-internal-testing/ngram-beam-search-decoder''' def lowercase__ ( self, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.add_kwargs_tokens_map.copy() kwargs.update(lowerCAmelCase ) return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase ) def lowercase__ ( self, **lowerCAmelCase ): """simple docstring""" return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname, **lowerCAmelCase ) def lowercase__ ( self, **lowerCAmelCase ): """simple docstring""" return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name, **lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_feature_extractor() lowerCamelCase_ =self.get_decoder() lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname ) # tokenizer self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer, lowerCAmelCase ) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor, lowerCAmelCase ) # decoder self.assertEqual(processor.decoder._alphabet.labels, decoder._alphabet.labels ) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set, decoder.model_container[decoder._model_key]._unigram_set, ) self.assertIsInstance(processor.decoder, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =WavaVecaProcessorWithLM( tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor(), decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname ) # make sure that error is thrown when decoder alphabet doesn't match lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained( self.tmpdirname, alpha=5.0, beta=3.0, score_boundary=-7.0, unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha, 5.0 ) self.assertEqual(processor.language_model.beta, 3.0 ) self.assertEqual(processor.language_model.score_boundary, -7.0 ) self.assertEqual(processor.language_model.unk_score_offset, 3 ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(['''xx'''] ) with self.assertRaisesRegex(lowerCAmelCase, '''include''' ): WavaVecaProcessorWithLM( tokenizer=lowerCAmelCase, feature_extractor=self.get_feature_extractor(), decoder=self.get_decoder() ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_feature_extractor() lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_decoder() lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase ) lowerCamelCase_ =floats_list((3, 1_000) ) lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''np''' ) lowerCamelCase_ =processor(lowerCAmelCase, return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2 ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_feature_extractor() lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_decoder() lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase ) lowerCamelCase_ ='''This is a test string''' lowerCamelCase_ =processor(text=lowerCAmelCase ) lowerCamelCase_ =tokenizer(lowerCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key] ) def lowercase__ ( self, lowerCAmelCase=(2, 10, 16), lowerCAmelCase=77 ): """simple docstring""" np.random.seed(lowerCAmelCase ) return np.random.rand(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_feature_extractor() lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_decoder() lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase ) lowerCamelCase_ =self._get_dummy_logits(shape=(10, 16), seed=13 ) lowerCamelCase_ =processor.decode(lowerCAmelCase ) lowerCamelCase_ =decoder.decode_beams(lowerCAmelCase )[0] self.assertEqual(decoded_decoder[0], decoded_processor.text ) self.assertEqual('''</s> <s> </s>''', decoded_processor.text ) self.assertEqual(decoded_decoder[-2], decoded_processor.logit_score ) self.assertEqual(decoded_decoder[-1], decoded_processor.lm_score ) @parameterized.expand([[None], ['''fork'''], ['''spawn''']] ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.get_feature_extractor() lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_decoder() lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase ) lowerCamelCase_ =self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: lowerCamelCase_ =processor.batch_decode(lowerCAmelCase ) else: with get_context(lowerCAmelCase ).Pool() as pool: lowerCamelCase_ =processor.batch_decode(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =list(lowerCAmelCase ) with get_context('''fork''' ).Pool() as p: lowerCamelCase_ =decoder.decode_beams_batch(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =[], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0] ) logit_scores_decoder.append(beams[0][-2] ) lm_scores_decoder.append(beams[0][-1] ) self.assertListEqual(lowerCAmelCase, decoded_processor.text ) self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''], decoded_processor.text ) self.assertListEqual(lowerCAmelCase, decoded_processor.logit_score ) self.assertListEqual(lowerCAmelCase, decoded_processor.lm_score ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_feature_extractor() lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_decoder() lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase ) lowerCamelCase_ =self._get_dummy_logits() lowerCamelCase_ =15 lowerCamelCase_ =-2_0.0 lowerCamelCase_ =-4.0 lowerCamelCase_ =processor.batch_decode( lowerCAmelCase, beam_width=lowerCAmelCase, beam_prune_logp=lowerCAmelCase, token_min_logp=lowerCAmelCase, ) lowerCamelCase_ =decoded_processor_out.text lowerCamelCase_ =list(lowerCAmelCase ) with get_context('''fork''' ).Pool() as pool: lowerCamelCase_ =decoder.decode_beams_batch( lowerCAmelCase, lowerCAmelCase, beam_width=lowerCAmelCase, beam_prune_logp=lowerCAmelCase, token_min_logp=lowerCAmelCase, ) lowerCamelCase_ =[d[0][0] for d in decoded_decoder_out] lowerCamelCase_ =[d[0][2] for d in decoded_decoder_out] lowerCamelCase_ =[d[0][3] for d in decoded_decoder_out] self.assertListEqual(lowerCAmelCase, lowerCAmelCase ) self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''], lowerCAmelCase ) self.assertTrue(np.array_equal(lowerCAmelCase, decoded_processor_out.logit_score ) ) self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7], lowerCAmelCase, atol=1e-3 ) ) self.assertTrue(np.array_equal(lowerCAmelCase, decoded_processor_out.lm_score ) ) self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4], lowerCAmelCase, atol=1e-3 ) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_feature_extractor() lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_decoder() lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase ) lowerCamelCase_ =self._get_dummy_logits() lowerCamelCase_ =2.0 lowerCamelCase_ =5.0 lowerCamelCase_ =-2_0.0 lowerCamelCase_ =True lowerCamelCase_ =processor.batch_decode( lowerCAmelCase, alpha=lowerCAmelCase, beta=lowerCAmelCase, unk_score_offset=lowerCAmelCase, lm_score_boundary=lowerCAmelCase, ) lowerCamelCase_ =decoded_processor_out.text lowerCamelCase_ =list(lowerCAmelCase ) decoder.reset_params( alpha=lowerCAmelCase, beta=lowerCAmelCase, unk_score_offset=lowerCAmelCase, lm_score_boundary=lowerCAmelCase, ) with get_context('''fork''' ).Pool() as pool: lowerCamelCase_ =decoder.decode_beams_batch( lowerCAmelCase, lowerCAmelCase, ) lowerCamelCase_ =[d[0][0] for d in decoded_decoder_out] self.assertListEqual(lowerCAmelCase, lowerCAmelCase ) self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''], lowerCAmelCase ) lowerCamelCase_ =processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha, 2.0 ) self.assertEqual(lm_model.beta, 5.0 ) self.assertEqual(lm_model.unk_score_offset, -2_0.0 ) self.assertEqual(lm_model.score_boundary, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) lowerCamelCase_ =processor.decoder.model_container[processor.decoder._model_key] lowerCamelCase_ =Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() lowerCamelCase_ =os.listdir(lowerCAmelCase ) lowerCamelCase_ =['''alphabet.json''', '''language_model'''] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(lowerCAmelCase, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =snapshot_download('''hf-internal-testing/processor_with_lm''' ) lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained(lowerCAmelCase ) lowerCamelCase_ =processor.decoder.model_container[processor.decoder._model_key] lowerCamelCase_ =Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() lowerCamelCase_ =os.listdir(lowerCAmelCase ) lowerCamelCase_ =os.listdir(lowerCAmelCase ) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(lowerCAmelCase, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) lowerCamelCase_ =AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' ) lowerCamelCase_ =floats_list((3, 1_000) ) lowerCamelCase_ =processor_wavaveca(lowerCAmelCase, return_tensors='''np''' ) lowerCamelCase_ =processor_auto(lowerCAmelCase, return_tensors='''np''' ) for key in input_wavaveca.keys(): self.assertAlmostEqual(input_wavaveca[key].sum(), input_auto[key].sum(), delta=1e-2 ) lowerCamelCase_ =self._get_dummy_logits() lowerCamelCase_ =processor_wavaveca.batch_decode(lowerCAmelCase ) lowerCamelCase_ =processor_auto.batch_decode(lowerCAmelCase ) self.assertListEqual(decoded_wavaveca.text, decoded_auto.text ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_feature_extractor() lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_decoder() lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase ) self.assertListEqual( processor.model_input_names, feature_extractor.model_input_names, msg='''`processor` and `feature_extractor` model input names do not match''', ) @staticmethod def lowercase__ ( lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =[d[key] for d in offsets] return retrieved_list def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) lowerCamelCase_ =self._get_dummy_logits()[0] lowerCamelCase_ =processor.decode(lowerCAmelCase, output_word_offsets=lowerCAmelCase ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ), 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(lowerCAmelCase, lowerCAmelCase ) ) self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''], '''word''' ) ), outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''], '''word''' ), ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''], '''start_offset''' ), [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''], '''end_offset''' ), [1, 3, 5] ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) lowerCamelCase_ =self._get_dummy_logits() lowerCamelCase_ =processor.batch_decode(lowerCAmelCase, output_word_offsets=lowerCAmelCase ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ), 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(lowerCAmelCase, lowerCAmelCase ) ) self.assertListEqual( [''' '''.join(self.get_from_offsets(lowerCAmelCase, '''word''' ) ) for o in outputs['''word_offsets''']], outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0], '''word''' ), ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0], '''start_offset''' ), [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0], '''end_offset''' ), [1, 3, 5] ) @slow @require_torch @require_torchaudio def lowercase__ ( self ): """simple docstring""" import torch lowerCamelCase_ =load_dataset('''common_voice''', '''en''', split='''train''', streaming=lowerCAmelCase ) lowerCamelCase_ =ds.cast_column('''audio''', datasets.Audio(sampling_rate=16_000 ) ) lowerCamelCase_ =iter(lowerCAmelCase ) lowerCamelCase_ =next(lowerCAmelCase ) lowerCamelCase_ =AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) lowerCamelCase_ =WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train lowerCamelCase_ =processor(sample['''audio''']['''array'''], return_tensors='''pt''' ).input_values with torch.no_grad(): lowerCamelCase_ =model(lowerCAmelCase ).logits.cpu().numpy() lowerCamelCase_ =processor.decode(logits[0], output_word_offsets=lowerCAmelCase ) lowerCamelCase_ =model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate lowerCamelCase_ =[ { '''start_time''': d['''start_offset'''] * time_offset, '''end_time''': d['''end_offset'''] * time_offset, '''word''': d['''word'''], } for d in output['''word_offsets'''] ] lowerCamelCase_ ='''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL''' # output words self.assertEqual(''' '''.join(self.get_from_offsets(lowerCAmelCase, '''word''' ) ), lowerCAmelCase ) self.assertEqual(''' '''.join(self.get_from_offsets(lowerCAmelCase, '''word''' ) ), output.text ) # output times lowerCamelCase_ =torch.tensor(self.get_from_offsets(lowerCAmelCase, '''start_time''' ) ) lowerCamelCase_ =torch.tensor(self.get_from_offsets(lowerCAmelCase, '''end_time''' ) ) # fmt: off lowerCamelCase_ =torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] ) lowerCamelCase_ =torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] ) # fmt: on self.assertTrue(torch.allclose(lowerCAmelCase, lowerCAmelCase, atol=0.0_1 ) ) self.assertTrue(torch.allclose(lowerCAmelCase, lowerCAmelCase, atol=0.0_1 ) )
75
1
'''simple docstring''' import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() a_ : Optional[Any] = logging.get_logger(__name__) def a_ ( __snake_case : int ) -> Optional[int]: """simple docstring""" print('''Loading config file...''' ) def flatten_yaml_as_dict(__snake_case : int , __snake_case : Any="" , __snake_case : List[str]="." ): lowerCamelCase_ =[] for k, v in d.items(): lowerCamelCase_ =parent_key + sep + k if parent_key else k if isinstance(__snake_case , collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(__snake_case , __snake_case , sep=__snake_case ).items() ) else: items.append((new_key, v) ) return dict(__snake_case ) lowerCamelCase_ =argparse.Namespace() with open(__snake_case , '''r''' ) as yaml_file: try: lowerCamelCase_ =yaml.load(__snake_case , Loader=yaml.FullLoader ) lowerCamelCase_ =flatten_yaml_as_dict(__snake_case ) for k, v in flat_cfg.items(): setattr(__snake_case , __snake_case , __snake_case ) except yaml.YAMLError as exc: logger.error('''Error while loading config file: {}. Error message: {}'''.format(__snake_case , str(__snake_case ) ) ) return config def a_ ( __snake_case : Tuple , __snake_case : List[str] ) -> Tuple: """simple docstring""" lowerCamelCase_ =MobileViTVaConfig() lowerCamelCase_ =False # dataset if task_name.startswith('''imagenet1k_''' ): lowerCamelCase_ =1000 if int(task_name.strip().split('''_''' )[-1] ) == 384: lowerCamelCase_ =384 else: lowerCamelCase_ =256 lowerCamelCase_ ='''imagenet-1k-id2label.json''' elif task_name.startswith('''imagenet21k_to_1k_''' ): lowerCamelCase_ =2_1000 if int(task_name.strip().split('''_''' )[-1] ) == 384: lowerCamelCase_ =384 else: lowerCamelCase_ =256 lowerCamelCase_ ='''imagenet-22k-id2label.json''' elif task_name.startswith('''ade20k_''' ): lowerCamelCase_ =151 lowerCamelCase_ =512 lowerCamelCase_ ='''ade20k-id2label.json''' lowerCamelCase_ =True elif task_name.startswith('''voc_''' ): lowerCamelCase_ =21 lowerCamelCase_ =512 lowerCamelCase_ ='''pascal-voc-id2label.json''' lowerCamelCase_ =True # orig_config lowerCamelCase_ =load_orig_config_file(__snake_case ) assert getattr(__snake_case , '''model.classification.name''' , -1 ) == "mobilevit_v2", "Invalid model" lowerCamelCase_ =getattr(__snake_case , '''model.classification.mitv2.width_multiplier''' , 1.0 ) assert ( getattr(__snake_case , '''model.classification.mitv2.attn_norm_layer''' , -1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" lowerCamelCase_ =getattr(__snake_case , '''model.classification.activation.name''' , '''swish''' ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: lowerCamelCase_ =getattr(__snake_case , '''model.segmentation.output_stride''' , 16 ) if "_deeplabv3" in task_name: lowerCamelCase_ =getattr(__snake_case , '''model.segmentation.deeplabv3.aspp_rates''' , [12, 24, 36] ) lowerCamelCase_ =getattr(__snake_case , '''model.segmentation.deeplabv3.aspp_out_channels''' , 512 ) lowerCamelCase_ =getattr(__snake_case , '''model.segmentation.deeplabv3.aspp_dropout''' , 0.1 ) # id2label lowerCamelCase_ ='''huggingface/label-files''' lowerCamelCase_ =json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='''dataset''' ) , '''r''' ) ) lowerCamelCase_ ={int(__snake_case ): v for k, v in idalabel.items()} lowerCamelCase_ =idalabel lowerCamelCase_ ={v: k for k, v in idalabel.items()} return config def a_ ( __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : List[str] ) -> Optional[int]: """simple docstring""" lowerCamelCase_ =dct.pop(__snake_case ) lowerCamelCase_ =val def a_ ( __snake_case : List[str] , __snake_case : Optional[int]=False ) -> Optional[int]: """simple docstring""" if base_model: lowerCamelCase_ ='''''' else: lowerCamelCase_ ='''mobilevitv2.''' lowerCamelCase_ =[] for k in state_dict.keys(): if k[:8] == "encoder.": lowerCamelCase_ =k[8:] else: lowerCamelCase_ =k if ".block." in k: lowerCamelCase_ =k_new.replace('''.block.''' , '''.''' ) if ".conv." in k: lowerCamelCase_ =k_new.replace('''.conv.''' , '''.convolution.''' ) if ".norm." in k: lowerCamelCase_ =k_new.replace('''.norm.''' , '''.normalization.''' ) if "conv_1." in k: lowerCamelCase_ =k_new.replace('''conv_1.''' , F'''{model_prefix}conv_stem.''' ) for i in [1, 2]: if F'''layer_{i}.''' in k: lowerCamelCase_ =k_new.replace(F'''layer_{i}.''' , F'''{model_prefix}encoder.layer.{i-1}.layer.''' ) if ".exp_1x1." in k: lowerCamelCase_ =k_new.replace('''.exp_1x1.''' , '''.expand_1x1.''' ) if ".red_1x1." in k: lowerCamelCase_ =k_new.replace('''.red_1x1.''' , '''.reduce_1x1.''' ) for i in [3, 4, 5]: if F'''layer_{i}.0.''' in k: lowerCamelCase_ =k_new.replace(F'''layer_{i}.0.''' , F'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' ) if F'''layer_{i}.1.local_rep.0.''' in k: lowerCamelCase_ =k_new.replace(F'''layer_{i}.1.local_rep.0.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' ) if F'''layer_{i}.1.local_rep.1.''' in k: lowerCamelCase_ =k_new.replace(F'''layer_{i}.1.local_rep.1.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' ) for i in [3, 4, 5]: if i == 3: lowerCamelCase_ =[0, 1] elif i == 4: lowerCamelCase_ =[0, 1, 2, 3] elif i == 5: lowerCamelCase_ =[0, 1, 2] for j in j_in: if F'''layer_{i}.1.global_rep.{j}.''' in k: lowerCamelCase_ =k_new.replace( F'''layer_{i}.1.global_rep.{j}.''' , F'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' ) if F'''layer_{i}.1.global_rep.{j+1}.''' in k: lowerCamelCase_ =k_new.replace( F'''layer_{i}.1.global_rep.{j+1}.''' , F'''{model_prefix}encoder.layer.{i-1}.layernorm.''' ) if F'''layer_{i}.1.conv_proj.''' in k: lowerCamelCase_ =k_new.replace(F'''layer_{i}.1.conv_proj.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' ) if "pre_norm_attn.0." in k: lowerCamelCase_ =k_new.replace('''pre_norm_attn.0.''' , '''layernorm_before.''' ) if "pre_norm_attn.1." in k: lowerCamelCase_ =k_new.replace('''pre_norm_attn.1.''' , '''attention.''' ) if "pre_norm_ffn.0." in k: lowerCamelCase_ =k_new.replace('''pre_norm_ffn.0.''' , '''layernorm_after.''' ) if "pre_norm_ffn.1." in k: lowerCamelCase_ =k_new.replace('''pre_norm_ffn.1.''' , '''ffn.conv1.''' ) if "pre_norm_ffn.3." in k: lowerCamelCase_ =k_new.replace('''pre_norm_ffn.3.''' , '''ffn.conv2.''' ) if "classifier.1." in k: lowerCamelCase_ =k_new.replace('''classifier.1.''' , '''classifier.''' ) if "seg_head." in k: lowerCamelCase_ =k_new.replace('''seg_head.''' , '''segmentation_head.''' ) if ".aspp_layer." in k: lowerCamelCase_ =k_new.replace('''.aspp_layer.''' , '''.''' ) if ".aspp_pool." in k: lowerCamelCase_ =k_new.replace('''.aspp_pool.''' , '''.''' ) rename_keys.append((k, k_new) ) return rename_keys def a_ ( __snake_case : Any ) -> Dict: """simple docstring""" lowerCamelCase_ =[] for k in state_dict.keys(): if k.startswith('''seg_head.aux_head.''' ): keys_to_ignore.append(__snake_case ) for k in keys_to_ignore: state_dict.pop(__snake_case , __snake_case ) def a_ ( ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ ='''http://images.cocodataset.org/val2017/000000039769.jpg''' # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" lowerCamelCase_ =Image.open(requests.get(__snake_case , stream=__snake_case ).raw ) return im @torch.no_grad() def a_ ( __snake_case : Dict , __snake_case : str , __snake_case : Optional[int] , __snake_case : Union[str, Any] ) -> str: """simple docstring""" lowerCamelCase_ =get_mobilevitva_config(__snake_case , __snake_case ) # load original state_dict lowerCamelCase_ =torch.load(__snake_case , map_location='''cpu''' ) # load huggingface model if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ): lowerCamelCase_ =MobileViTVaForSemanticSegmentation(__snake_case ).eval() lowerCamelCase_ =False else: lowerCamelCase_ =MobileViTVaForImageClassification(__snake_case ).eval() lowerCamelCase_ =False # remove and rename some keys of load the original model lowerCamelCase_ =checkpoint remove_unused_keys(__snake_case ) lowerCamelCase_ =create_rename_keys(__snake_case , base_model=__snake_case ) for rename_key_src, rename_key_dest in rename_keys: rename_key(__snake_case , __snake_case , __snake_case ) # load modified state_dict model.load_state_dict(__snake_case ) # Check outputs on an image, prepared by MobileViTImageProcessor lowerCamelCase_ =MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) lowerCamelCase_ =image_processor(images=prepare_img() , return_tensors='''pt''' ) lowerCamelCase_ =model(**__snake_case ) # verify classification model if task_name.startswith('''imagenet''' ): lowerCamelCase_ =outputs.logits lowerCamelCase_ =logits.argmax(-1 ).item() print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] ) if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0: # expected_logits for base variant lowerCamelCase_ =torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] ) assert torch.allclose(logits[0, :3] , __snake_case , atol=1e-4 ) Path(__snake_case ).mkdir(exist_ok=__snake_case ) print(F'''Saving model {task_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__snake_case ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__snake_case ) if __name__ == "__main__": a_ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--task""", default="""imagenet1k_256""", type=str, help=( """Name of the task for which the MobileViTV2 model you'd like to convert is trained on . """ """ Classification (ImageNet-1k) - MobileViTV2 (256x256) : imagenet1k_256 - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384 - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) : imagenet21k_to_1k_256 - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on ImageNet-1k 384x384) : imagenet21k_to_1k_384 Segmentation - ADE20K Dataset : ade20k_deeplabv3 - Pascal VOC 2012 Dataset: voc_deeplabv3 """ ), choices=[ """imagenet1k_256""", """imagenet1k_384""", """imagenet21k_to_1k_256""", """imagenet21k_to_1k_384""", """ade20k_deeplabv3""", """voc_deeplabv3""", ], ) parser.add_argument( """--orig_checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file).""" ) parser.add_argument("""--orig_config_path""", required=True, type=str, help="""Path to the original config file.""") parser.add_argument( """--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory.""" ) a_ : Dict = parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
75
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPixaPixPipeline, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase : List[Any] =StableDiffusionInstructPixaPixPipeline lowercase : List[Any] =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'} lowercase : Optional[Any] =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS lowercase : Union[str, Any] =IMAGE_TO_IMAGE_IMAGE_PARAMS lowercase : List[Any] =IMAGE_TO_IMAGE_IMAGE_PARAMS def lowercase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCamelCase_ =UNetaDConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=8, out_channels=4, down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D'''), up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D'''), cross_attention_dim=32, ) lowerCamelCase_ =PNDMScheduler(skip_prk_steps=lowerCAmelCase ) torch.manual_seed(0 ) lowerCamelCase_ =AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], latent_channels=4, ) torch.manual_seed(0 ) lowerCamelCase_ =CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, ) lowerCamelCase_ =CLIPTextModel(lowerCAmelCase ) lowerCamelCase_ =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) lowerCamelCase_ ={ '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=0 ): """simple docstring""" lowerCamelCase_ =floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase ) lowerCamelCase_ =image.cpu().permute(0, 2, 3, 1 )[0] lowerCamelCase_ =Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('''RGB''' ) if str(lowerCAmelCase ).startswith('''mps''' ): lowerCamelCase_ =torch.manual_seed(lowerCAmelCase ) else: lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase ) lowerCamelCase_ ={ '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''image_guidance_scale''': 1, '''output_type''': '''numpy''', } return inputs def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCamelCase_ =self.get_dummy_components() lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase ) lowerCamelCase_ =sd_pipe.to(lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase ) lowerCamelCase_ =sd_pipe(**lowerCAmelCase ).images lowerCamelCase_ =image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCamelCase_ =np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCamelCase_ =self.get_dummy_components() lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase ) lowerCamelCase_ =sd_pipe.to(lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase ) lowerCamelCase_ ='''french fries''' lowerCamelCase_ =sd_pipe(**lowerCAmelCase, negative_prompt=lowerCAmelCase ) lowerCamelCase_ =output.images lowerCamelCase_ =image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCamelCase_ =np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCamelCase_ =self.get_dummy_components() lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase ) lowerCamelCase_ =sd_pipe.to(lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase ) lowerCamelCase_ =[inputs['''prompt''']] * 2 lowerCamelCase_ =np.array(inputs['''image'''] ).astype(np.floataa ) / 2_5_5.0 lowerCamelCase_ =torch.from_numpy(lowerCAmelCase ).unsqueeze(0 ).to(lowerCAmelCase ) lowerCamelCase_ =image / 2 + 0.5 lowerCamelCase_ =image.permute(0, 3, 1, 2 ) lowerCamelCase_ =image.repeat(2, 1, 1, 1 ) lowerCamelCase_ =sd_pipe(**lowerCAmelCase ).images lowerCamelCase_ =image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) lowerCamelCase_ =np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCamelCase_ =self.get_dummy_components() lowerCamelCase_ =EulerAncestralDiscreteScheduler( beta_start=0.0_0_0_8_5, beta_end=0.0_1_2, beta_schedule='''scaled_linear''' ) lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase ) lowerCamelCase_ =sd_pipe.to(lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase ) lowerCamelCase_ =sd_pipe(**lowerCAmelCase ).images lowerCamelCase_ =image[0, -3:, -3:, -1] lowerCamelCase_ =[round(lowerCAmelCase, 4 ) for x in image_slice.flatten().tolist()] print(''','''.join([str(lowerCAmelCase ) for x in slice] ) ) assert image.shape == (1, 32, 32, 3) lowerCamelCase_ =np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def lowercase__ ( self ): """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_dummy_components() lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase ) lowerCamelCase_ =VaeImageProcessor(do_resize=lowerCAmelCase, do_normalize=lowerCAmelCase ) lowerCamelCase_ =pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =pipe(**self.get_dummy_inputs_by_type(lowerCAmelCase, input_image_type='''pt''' ) )[0] lowerCamelCase_ =components['''vae'''] lowerCamelCase_ =self.get_dummy_inputs_by_type(lowerCAmelCase, input_image_type='''pt''' ) for image_param in self.image_latents_params: if image_param in inputs.keys(): lowerCamelCase_ =vae.encode(inputs[image_param] ).latent_dist.mode() lowerCamelCase_ =pipe(**lowerCAmelCase )[0] lowerCamelCase_ =np.abs(out - out_latents_inputs ).max() self.assertLess(lowerCAmelCase, 1e-4, '''passing latents as image input generate different result from passing image''' ) @slow @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): def lowercase__ ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase__ ( self, lowerCAmelCase=0 ): """simple docstring""" lowerCamelCase_ =torch.manual_seed(lowerCAmelCase ) lowerCamelCase_ =load_image( '''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' ) lowerCamelCase_ ={ '''prompt''': '''turn him into a cyborg''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''image_guidance_scale''': 1.0, '''output_type''': '''numpy''', } return inputs def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''', safety_checker=lowerCAmelCase ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) pipe.enable_attention_slicing() lowerCamelCase_ =self.get_inputs() lowerCamelCase_ =pipe(**lowerCAmelCase ).images lowerCamelCase_ =image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) lowerCamelCase_ =np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''', safety_checker=lowerCAmelCase ) lowerCamelCase_ =LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) pipe.enable_attention_slicing() lowerCamelCase_ =self.get_inputs() lowerCamelCase_ =pipe(**lowerCAmelCase ).images lowerCamelCase_ =image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) lowerCamelCase_ =np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''', safety_checker=lowerCAmelCase ) lowerCamelCase_ =DDIMScheduler.from_config(pipe.scheduler.config ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) pipe.enable_attention_slicing() lowerCamelCase_ =self.get_inputs() lowerCamelCase_ =pipe(**lowerCAmelCase ).images lowerCamelCase_ =image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) lowerCamelCase_ =np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =0 def callback_fn(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) -> None: lowerCamelCase_ =True nonlocal number_of_steps number_of_steps += 1 if step == 1: lowerCamelCase_ =latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) lowerCamelCase_ =latents[0, -3:, -3:, -1] lowerCamelCase_ =np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 elif step == 2: lowerCamelCase_ =latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) lowerCamelCase_ =latents[0, -3:, -3:, -1] lowerCamelCase_ =np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 lowerCamelCase_ =False lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''', safety_checker=lowerCAmelCase, torch_dtype=torch.floataa ) lowerCamelCase_ =pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) pipe.enable_attention_slicing() lowerCamelCase_ =self.get_inputs() pipe(**lowerCAmelCase, callback=lowerCAmelCase, callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def lowercase__ ( self ): """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''', safety_checker=lowerCAmelCase, torch_dtype=torch.floataa ) lowerCamelCase_ =pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() lowerCamelCase_ =self.get_inputs() lowerCamelCase_ =pipe(**lowerCAmelCase ) lowerCamelCase_ =torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 10**9 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 lowerCamelCase_ =inputs['''image'''].resize((504, 504) ) lowerCamelCase_ ='''timbrooks/instruct-pix2pix''' lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained( lowerCAmelCase, safety_checker=lowerCAmelCase, ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) pipe.enable_attention_slicing() lowerCamelCase_ =pipe(**lowerCAmelCase ) lowerCamelCase_ =output.images[0] lowerCamelCase_ =image[255:258, 383:386, -1] assert image.shape == (504, 504, 3) lowerCamelCase_ =np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
75
1
'''simple docstring''' import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel a_ : Dict = logging.getLogger(__name__) def a_ ( __snake_case : Tuple , __snake_case : Optional[int] ) -> Any: """simple docstring""" # save results if os.path.exists(__snake_case ): if os.path.exists(os.path.join(__snake_case , '''config.json''' ) ) and os.path.isfile( os.path.join(__snake_case , '''config.json''' ) ): os.remove(os.path.join(__snake_case , '''config.json''' ) ) if os.path.exists(os.path.join(__snake_case , '''pytorch_model.bin''' ) ) and os.path.isfile( os.path.join(__snake_case , '''pytorch_model.bin''' ) ): os.remove(os.path.join(__snake_case , '''pytorch_model.bin''' ) ) else: os.makedirs(__snake_case ) model.save_pretrained(__snake_case ) def a_ ( __snake_case : Union[str, Any] , __snake_case : Dict=False ) -> int: """simple docstring""" lowerCamelCase_ =2 if unlogit: lowerCamelCase_ =torch.pow(__snake_case , __snake_case ) lowerCamelCase_ =p * torch.log(__snake_case ) lowerCamelCase_ =0 return -plogp.sum(dim=-1 ) def a_ ( __snake_case : Optional[int] ) -> Union[str, Any]: """simple docstring""" logger.info('''lv, h >\t''' + '''\t'''.join(F'''{x + 1}''' for x in range(len(__snake_case ) ) ) ) for row in range(len(__snake_case ) ): if tensor.dtype != torch.long: logger.info(F'''layer {row + 1}:\t''' + '''\t'''.join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) ) else: logger.info(F'''layer {row + 1}:\t''' + '''\t'''.join(F'''{x:d}''' for x in tensor[row].cpu().data ) ) def a_ ( __snake_case : str , __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : Any=True , __snake_case : Union[str, Any]=True , __snake_case : int=None , __snake_case : Tuple=False ) -> int: """simple docstring""" lowerCamelCase_, lowerCamelCase_ =model.config.num_hidden_layers, model.config.num_attention_heads lowerCamelCase_ =torch.zeros(__snake_case , __snake_case ).to(args.device ) lowerCamelCase_ =torch.zeros(__snake_case , __snake_case ).to(args.device ) if head_mask is None: lowerCamelCase_ =torch.ones(__snake_case , __snake_case ).to(args.device ) head_mask.requires_grad_(requires_grad=__snake_case ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: lowerCamelCase_ =None lowerCamelCase_ =0.0 lowerCamelCase_ =0.0 for step, inputs in enumerate(tqdm(__snake_case , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ): lowerCamelCase_ =tuple(t.to(args.device ) for t in inputs ) ((lowerCamelCase_), ) =inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) lowerCamelCase_ =model(__snake_case , labels=__snake_case , head_mask=__snake_case ) # (loss), lm_logits, presents, (all hidden_states), (attentions) lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(__snake_case ): lowerCamelCase_ =entropy(attn.detach() , __snake_case ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(__snake_case ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: lowerCamelCase_ =2 lowerCamelCase_ =torch.pow(torch.pow(__snake_case , __snake_case ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20 if not args.dont_normalize_global_importance: lowerCamelCase_ =(head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info('''Attention entropies''' ) print_ad_tensor(__snake_case ) if compute_importance: logger.info('''Head importance scores''' ) print_ad_tensor(__snake_case ) logger.info('''Head ranked by importance scores''' ) lowerCamelCase_ =torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) lowerCamelCase_ =torch.arange( head_importance.numel() , device=args.device ) lowerCamelCase_ =head_ranks.view_as(__snake_case ) print_ad_tensor(__snake_case ) return attn_entropy, head_importance, total_loss def a_ ( __snake_case : Tuple , __snake_case : Optional[int] , __snake_case : Any ) -> List[str]: """simple docstring""" lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =compute_heads_importance(__snake_case , __snake_case , __snake_case , compute_entropy=__snake_case ) lowerCamelCase_ =1 / loss # instead of downsteam score use the LM loss logger.info('''Pruning: original score: %f, threshold: %f''' , __snake_case , original_score * args.masking_threshold ) lowerCamelCase_ =torch.ones_like(__snake_case ) lowerCamelCase_ =max(1 , int(new_head_mask.numel() * args.masking_amount ) ) lowerCamelCase_ =original_score while current_score >= original_score * args.masking_threshold: lowerCamelCase_ =new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads lowerCamelCase_ =float('''Inf''' ) lowerCamelCase_ =head_importance.view(-1 ).sort()[1] if len(__snake_case ) <= num_to_mask: print('''BREAK BY num_to_mask''' ) break # mask heads lowerCamelCase_ =current_heads_to_mask[:num_to_mask] logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) ) lowerCamelCase_ =new_head_mask.view(-1 ) lowerCamelCase_ =0.0 lowerCamelCase_ =new_head_mask.view_as(__snake_case ) lowerCamelCase_ =new_head_mask.clone().detach() print_ad_tensor(__snake_case ) # Compute metric and head importance again lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =compute_heads_importance( __snake_case , __snake_case , __snake_case , compute_entropy=__snake_case , head_mask=__snake_case ) lowerCamelCase_ =1 / loss logger.info( '''Masking: current score: %f, remaining heads %d (%.1f percents)''' , __snake_case , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , ) logger.info('''Final head mask''' ) print_ad_tensor(__snake_case ) np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() ) return head_mask def a_ ( __snake_case : str , __snake_case : int , __snake_case : List[Any] , __snake_case : str ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ =datetime.now() lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =compute_heads_importance( __snake_case , __snake_case , __snake_case , compute_entropy=__snake_case , compute_importance=__snake_case , head_mask=__snake_case ) lowerCamelCase_ =1 / loss lowerCamelCase_ =datetime.now() - before_time lowerCamelCase_ =sum(p.numel() for p in model.parameters() ) lowerCamelCase_ ={ layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__snake_case ) ) } for k, v in heads_to_prune.items(): if isinstance(__snake_case , __snake_case ): lowerCamelCase_ =[ v, ] assert sum(len(__snake_case ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(__snake_case ) lowerCamelCase_ =sum(p.numel() for p in model.parameters() ) lowerCamelCase_ =datetime.now() lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =compute_heads_importance( __snake_case , __snake_case , __snake_case , compute_entropy=__snake_case , compute_importance=__snake_case , head_mask=__snake_case , actually_pruned=__snake_case , ) lowerCamelCase_ =1 / loss lowerCamelCase_ =datetime.now() - before_time logger.info( '''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , __snake_case , __snake_case , pruned_num_params / original_num_params * 100 , ) logger.info('''Pruning: score with masking: %f score with pruning: %f''' , __snake_case , __snake_case ) logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 ) save_model(__snake_case , args.output_dir ) def a_ ( ) -> Optional[int]: """simple docstring""" lowerCamelCase_ =argparse.ArgumentParser() # Required parameters parser.add_argument( '''--data_dir''' , default=__snake_case , type=__snake_case , required=__snake_case , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , ) parser.add_argument( '''--model_name_or_path''' , default=__snake_case , type=__snake_case , required=__snake_case , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--output_dir''' , default=__snake_case , type=__snake_case , required=__snake_case , help='''The output directory where the model predictions and checkpoints will be written.''' , ) # Other parameters parser.add_argument( '''--config_name''' , default='''''' , type=__snake_case , help='''Pretrained config name or path if not the same as model_name_or_path''' , ) parser.add_argument( '''--tokenizer_name''' , default='''''' , type=__snake_case , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , ) parser.add_argument( '''--cache_dir''' , default=__snake_case , type=__snake_case , help='''Where do you want to store the pre-trained models downloaded from s3''' , ) parser.add_argument( '''--data_subset''' , type=__snake_case , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' ) parser.add_argument( '''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' ) parser.add_argument( '''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' ) parser.add_argument( '''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' ) parser.add_argument( '''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , ) parser.add_argument( '''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' ) parser.add_argument( '''--masking_threshold''' , default=0.9 , type=__snake_case , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , ) parser.add_argument( '''--masking_amount''' , default=0.1 , type=__snake_case , help='''Amount to heads to masking at each masking step.''' ) parser.add_argument('''--metric_name''' , default='''acc''' , type=__snake_case , help='''Metric to use for head masking.''' ) parser.add_argument( '''--max_seq_length''' , default=128 , type=__snake_case , help=( '''The maximum total input sequence length after WordPiece tokenization. \n''' '''Sequences longer than this will be truncated, sequences shorter padded.''' ) , ) parser.add_argument('''--batch_size''' , default=1 , type=__snake_case , help='''Batch size.''' ) parser.add_argument('''--seed''' , type=__snake_case , default=42 ) parser.add_argument('''--local_rank''' , type=__snake_case , default=-1 , help='''local_rank for distributed training on gpus''' ) parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' ) parser.add_argument('''--server_ip''' , type=__snake_case , default='''''' , help='''Can be used for distant debugging.''' ) parser.add_argument('''--server_port''' , type=__snake_case , default='''''' , help='''Can be used for distant debugging.''' ) lowerCamelCase_ =parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('''Waiting for debugger attach''' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__snake_case ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: lowerCamelCase_ =torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' ) lowerCamelCase_ =0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) lowerCamelCase_ =torch.device('''cuda''' , args.local_rank ) lowerCamelCase_ =1 torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) lowerCamelCase_ =GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: lowerCamelCase_ =nn.parallel.DistributedDataParallel( __snake_case , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__snake_case ) elif args.n_gpu > 1: lowerCamelCase_ =nn.DataParallel(__snake_case ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=__snake_case ) torch.save(__snake_case , os.path.join(args.output_dir , '''run_args.bin''' ) ) logger.info('''Training/evaluation parameters %s''' , __snake_case ) # Prepare dataset lowerCamelCase_ =np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) lowerCamelCase_ =(torch.from_numpy(__snake_case ),) lowerCamelCase_ =TensorDataset(*__snake_case ) lowerCamelCase_ =RandomSampler(__snake_case ) lowerCamelCase_ =DataLoader(__snake_case , sampler=__snake_case , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(__snake_case , __snake_case , __snake_case ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: lowerCamelCase_ =mask_heads(__snake_case , __snake_case , __snake_case ) prune_heads(__snake_case , __snake_case , __snake_case , __snake_case ) if __name__ == "__main__": main()
75
'''simple docstring''' from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class __UpperCamelCase : lowercase : Union[str, Any] =XGLMConfig lowercase : Optional[Any] ={} lowercase : Optional[int] ='gelu' def __init__( self, lowerCAmelCase, lowerCAmelCase=14, lowerCAmelCase=7, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=99, lowerCAmelCase=32, lowerCAmelCase=2, lowerCAmelCase=4, lowerCAmelCase=37, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=0.0_2, ): """simple docstring""" lowerCamelCase_ =parent lowerCamelCase_ =batch_size lowerCamelCase_ =seq_length lowerCamelCase_ =is_training lowerCamelCase_ =use_input_mask lowerCamelCase_ =use_labels lowerCamelCase_ =vocab_size lowerCamelCase_ =d_model lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =ffn_dim lowerCamelCase_ =activation_function lowerCamelCase_ =activation_dropout lowerCamelCase_ =attention_dropout lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =initializer_range lowerCamelCase_ =None lowerCamelCase_ =0 lowerCamelCase_ =2 lowerCamelCase_ =1 def lowercase__ ( self ): """simple docstring""" return XGLMConfig.from_pretrained('''facebook/xglm-564M''' ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length], self.vocab_size ), clip_value_min=0, clip_value_max=3 ) lowerCamelCase_ =None if self.use_input_mask: lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase_ =self.get_config() lowerCamelCase_ =floats_tensor([self.num_hidden_layers, self.num_attention_heads], 2 ) return ( config, input_ids, input_mask, head_mask, ) def lowercase__ ( self ): """simple docstring""" return XGLMConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, num_layers=self.num_hidden_layers, attention_heads=self.num_attention_heads, ffn_dim=self.ffn_dim, activation_function=self.activation_function, activation_dropout=self.activation_dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, use_cache=lowerCAmelCase, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, return_dict=lowerCAmelCase, ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.prepare_config_and_inputs() ( ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ) =config_and_inputs lowerCamelCase_ ={ '''input_ids''': input_ids, '''head_mask''': head_mask, } return config, inputs_dict @require_tf class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase : int =(TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () lowercase : Optional[Any] =(TFXGLMForCausalLM,) if is_tf_available() else () lowercase : Tuple =( {'feature-extraction': TFXGLMModel, 'text-generation': TFXGLMForCausalLM} if is_tf_available() else {} ) lowercase : Optional[Any] =False lowercase : Optional[Any] =False lowercase : Optional[int] =False def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =TFXGLMModelTester(self ) lowerCamelCase_ =ConfigTester(self, config_class=lowerCAmelCase, n_embd=37 ) def lowercase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() @slow def lowercase__ ( self ): """simple docstring""" for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ =TFXGLMModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) @unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''' ) def lowercase__ ( self ): """simple docstring""" super().test_resize_token_embeddings() @require_tf class __UpperCamelCase ( unittest.TestCase ): @slow def lowercase__ ( self, lowerCAmelCase=True ): """simple docstring""" lowerCamelCase_ =TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' ) lowerCamelCase_ =tf.convert_to_tensor([[2, 268, 9_865]], dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off lowerCamelCase_ =[2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581] # fmt: on lowerCamelCase_ =model.generate(lowerCAmelCase, do_sample=lowerCAmelCase, num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist(), lowerCAmelCase ) @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' ) lowerCamelCase_ =TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' ) tf.random.set_seed(0 ) lowerCamelCase_ =tokenizer('''Today is a nice day and''', return_tensors='''tf''' ) lowerCamelCase_ =tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(''':/CPU:0''' ): lowerCamelCase_ =model.generate(lowerCAmelCase, do_sample=lowerCAmelCase, seed=[7, 0] ) lowerCamelCase_ =tokenizer.decode(output_ids[0], skip_special_tokens=lowerCAmelCase ) lowerCamelCase_ =( '''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due''' ) self.assertEqual(lowerCAmelCase, lowerCAmelCase ) @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' ) lowerCamelCase_ =XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' ) lowerCamelCase_ ='''left''' # use different length sentences to test batching lowerCamelCase_ =[ '''This is an extremelly long sentence that only exists to test the ability of the model to cope with ''' '''left-padding, such as in batched generation. The output for the sequence below should be the same ''' '''regardless of whether left padding is applied or not. When''', '''Hello, my dog is a little''', ] lowerCamelCase_ =tokenizer(lowerCAmelCase, return_tensors='''tf''', padding=lowerCAmelCase ) lowerCamelCase_ =inputs['''input_ids'''] lowerCamelCase_ =model.generate(input_ids=lowerCAmelCase, attention_mask=inputs['''attention_mask'''], max_new_tokens=12 ) lowerCamelCase_ =tokenizer(sentences[0], return_tensors='''tf''' ).input_ids lowerCamelCase_ =model.generate(input_ids=lowerCAmelCase, max_new_tokens=12 ) lowerCamelCase_ =tokenizer(sentences[1], return_tensors='''tf''' ).input_ids lowerCamelCase_ =model.generate(input_ids=lowerCAmelCase, max_new_tokens=12 ) lowerCamelCase_ =tokenizer.batch_decode(lowerCAmelCase, skip_special_tokens=lowerCAmelCase ) lowerCamelCase_ =tokenizer.decode(output_non_padded[0], skip_special_tokens=lowerCAmelCase ) lowerCamelCase_ =tokenizer.decode(output_padded[0], skip_special_tokens=lowerCAmelCase ) lowerCamelCase_ =[ '''This is an extremelly long sentence that only exists to test the ability of the model to cope with ''' '''left-padding, such as in batched generation. The output for the sequence below should be the same ''' '''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be ''' '''a single''', '''Hello, my dog is a little bit of a shy one, but he is very friendly''', ] self.assertListEqual(lowerCAmelCase, lowerCAmelCase ) self.assertListEqual(lowerCAmelCase, [non_padded_sentence, padded_sentence] )
75
1
'''simple docstring''' import argparse import json import os from collections import OrderedDict import numpy as np import tensorflow as tf import torch def a_ ( __snake_case : Optional[Any] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ =os.path.join(args.tf_model_dir , '''parameters.json''' ) lowerCamelCase_ =json.loads(open(__snake_case ).read() ) if not params: raise ValueError( F'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' ) if not args.output.endswith('''.pt''' ): lowerCamelCase_ =args.output + '''.pt''' lowerCamelCase_ =OrderedDict() with tf.device('''/CPU:0''' ): lowerCamelCase_ =tf.train.load_checkpoint(args.tf_model_dir ) lowerCamelCase_ =reader.get_variable_to_shape_map() for key_name in shapes.keys(): lowerCamelCase_ =reader.get_tensor(__snake_case ).astype(np.floataa ) if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ): continue if key_name.startswith('''pasts/''' ): if key_name.startswith('''pasts/mlp''' ): lowerCamelCase_ =int(key_name[9] ) elif key_name.startswith('''pasts/out''' ): lowerCamelCase_ =8 lowerCamelCase_ ='''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time lowerCamelCase_ =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowerCamelCase_ =torch.tensor(__snake_case ) elif key_name.startswith('''model/moe''' ): lowerCamelCase_ =int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/switch_gating/kernel''' ): lowerCamelCase_ ='''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player lowerCamelCase_ =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowerCamelCase_ =torch.tensor(__snake_case ) elif key_name.endswith('''/softmlp/kernel''' ): lowerCamelCase_ ='''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player lowerCamelCase_ =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowerCamelCase_ =torch.tensor(__snake_case ) elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ): lowerCamelCase_ =key_name[-9:-7] for i in range(16 ): lowerCamelCase_ ='''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer) lowerCamelCase_ =( vnp[i].transpose([1, 0] ).copy() ) # In Mesh-Tensorflow, it is one array, so it is divided lowerCamelCase_ =torch.tensor(__snake_case ) elif key_name.startswith('''model/mlp''' ): lowerCamelCase_ =int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/p1/kernel''' ): lowerCamelCase_ ='''model.blocks.%d.feed_forward.mlp.wi.weight''' % player lowerCamelCase_ =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowerCamelCase_ =torch.tensor(__snake_case ) elif key_name.endswith('''/p1/bias''' ): lowerCamelCase_ ='''model.blocks.%d.feed_forward.mlp.wi.bias''' % player lowerCamelCase_ =vnp.copy() # same because it is one dimensional lowerCamelCase_ =torch.tensor(__snake_case ) elif key_name.endswith('''/p2/kernel''' ): lowerCamelCase_ ='''model.blocks.%d.feed_forward.mlp.wo.weight''' % player lowerCamelCase_ =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowerCamelCase_ =torch.tensor(__snake_case ) elif key_name.endswith('''/p2/bias''' ): lowerCamelCase_ ='''model.blocks.%d.feed_forward.mlp.wo.bias''' % player lowerCamelCase_ =vnp.copy() # same because it is one dimensional lowerCamelCase_ =torch.tensor(__snake_case ) elif key_name.startswith('''model/ln''' ): lowerCamelCase_ =int(key_name[8:].split('''/''' )[0] ) if key_name.endswith('''/b''' ): lowerCamelCase_ ='''model.blocks.%d.feed_forward.norm.bias''' % player lowerCamelCase_ =vnp.copy() # same because it is one dimensional lowerCamelCase_ =torch.tensor(__snake_case ) elif key_name.endswith('''/g''' ): lowerCamelCase_ ='''model.blocks.%d.feed_forward.norm.weight''' % player lowerCamelCase_ =vnp.copy() # same because it is one dimensional lowerCamelCase_ =torch.tensor(__snake_case ) elif key_name.startswith('''model/att''' ): lowerCamelCase_ =int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/qkv/kernel''' ): lowerCamelCase_ =vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum lowerCamelCase_ =state[:, 0, :, :] lowerCamelCase_ =state[:, 1, :, :] lowerCamelCase_ =state[:, 2, :, :] lowerCamelCase_ =( state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix lowerCamelCase_ =( state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix lowerCamelCase_ =( state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix lowerCamelCase_ ='''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player lowerCamelCase_ =torch.tensor(__snake_case ) lowerCamelCase_ ='''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player lowerCamelCase_ =torch.tensor(__snake_case ) lowerCamelCase_ ='''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player lowerCamelCase_ =torch.tensor(__snake_case ) elif key_name.endswith('''/o/kernel''' ): lowerCamelCase_ ='''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player lowerCamelCase_ =( vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy() ) # Mesh-Tensorflow is a diagonal matrix lowerCamelCase_ =torch.tensor(__snake_case ) elif key_name.startswith('''model/an''' ): lowerCamelCase_ =int(key_name[8:].split('''/''' )[0] ) if key_name.endswith('''/b''' ): lowerCamelCase_ ='''model.blocks.%d.self_attn.norm.bias''' % player lowerCamelCase_ =vnp.copy() # same because it is one dimensional lowerCamelCase_ =torch.tensor(__snake_case ) elif key_name.endswith('''/g''' ): lowerCamelCase_ ='''model.blocks.%d.self_attn.norm.weight''' % player lowerCamelCase_ =vnp.copy() # same because it is one dimensional lowerCamelCase_ =torch.tensor(__snake_case ) elif ( key_name.startswith('''model/wte''' ) or key_name.startswith('''model/wpe''' ) or key_name.startswith('''model/ete''' ) ): lowerCamelCase_ ={'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[ key_name[-3:] ] lowerCamelCase_ ='''model.%s.weight''' % nlayer lowerCamelCase_ =vnp.copy() # same in embedded lowerCamelCase_ =torch.tensor(__snake_case ) if key_name.startswith('''model/wte''' ): lowerCamelCase_ ='''lm_head.weight''' lowerCamelCase_ =vnp.copy() # same in embedded lowerCamelCase_ =torch.tensor(__snake_case ) elif key_name.startswith('''model/wob''' ): lowerCamelCase_ ='''final_logits_bias''' lowerCamelCase_ =vnp.copy() # same in embedded lowerCamelCase_ =state.reshape((1, -1) ) lowerCamelCase_ =torch.tensor(__snake_case ) elif key_name == "model/dense/kernel": lowerCamelCase_ ='''model.last_project.weight''' lowerCamelCase_ =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowerCamelCase_ =torch.tensor(__snake_case ) elif key_name == "model/dense_1/bias": lowerCamelCase_ ='''model.last_project.bias''' lowerCamelCase_ =vnp.copy() # same because it is one dimensional lowerCamelCase_ =torch.tensor(__snake_case ) torch.save(__snake_case , args.output ) if __name__ == "__main__": a_ : Optional[int] = argparse.ArgumentParser( description="""model converter.""", formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument("""--tf_model_dir""", metavar="""PATH""", type=str, required=True, help="""import model""") parser.add_argument("""--output""", metavar="""PATH""", type=str, required=True, help="""output model""") a_ : List[str] = parser.parse_args() convert_tf_gptsan_to_pt(args)
75
'''simple docstring''' import unittest from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __UpperCamelCase : @staticmethod def lowercase__ ( *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" pass @is_pipeline_test @require_vision @require_torch class __UpperCamelCase ( unittest.TestCase ): lowercase : int =MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =pipeline( '''zero-shot-object-detection''', model='''hf-internal-testing/tiny-random-owlvit-object-detection''' ) lowerCamelCase_ =[ { '''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''', '''candidate_labels''': ['''cat''', '''remote''', '''couch'''], } ] return object_detector, examples def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =object_detector(examples[0], threshold=0.0 ) lowerCamelCase_ =len(lowerCAmelCase ) self.assertGreater(lowerCAmelCase, 0 ) self.assertEqual( lowerCAmelCase, [ { '''score''': ANY(lowerCAmelCase ), '''label''': ANY(lowerCAmelCase ), '''box''': {'''xmin''': ANY(lowerCAmelCase ), '''ymin''': ANY(lowerCAmelCase ), '''xmax''': ANY(lowerCAmelCase ), '''ymax''': ANY(lowerCAmelCase )}, } for i in range(lowerCAmelCase ) ], ) @require_tf @unittest.skip('''Zero Shot Object Detection not implemented in TF''' ) def lowercase__ ( self ): """simple docstring""" pass @require_torch def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =pipeline( '''zero-shot-object-detection''', model='''hf-internal-testing/tiny-random-owlvit-object-detection''' ) lowerCamelCase_ =object_detector( '''./tests/fixtures/tests_samples/COCO/000000039769.png''', candidate_labels=['''cat''', '''remote''', '''couch'''], threshold=0.6_4, ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=4 ), [ {'''score''': 0.7_2_3_5, '''label''': '''cat''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}}, {'''score''': 0.7_2_1_8, '''label''': '''remote''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}}, {'''score''': 0.7_1_8_4, '''label''': '''couch''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}}, {'''score''': 0.6_7_4_8, '''label''': '''remote''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}}, {'''score''': 0.6_6_5_6, '''label''': '''cat''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}}, {'''score''': 0.6_6_1_4, '''label''': '''couch''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}}, {'''score''': 0.6_4_5_6, '''label''': '''remote''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}}, {'''score''': 0.6_4_2, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 274, '''xmax''': 93, '''ymax''': 297}}, {'''score''': 0.6_4_1_9, '''label''': '''cat''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}}, ], ) lowerCamelCase_ =object_detector( [ { '''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''', '''candidate_labels''': ['''cat''', '''remote''', '''couch'''], } ], threshold=0.6_4, ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=4 ), [ [ {'''score''': 0.7_2_3_5, '''label''': '''cat''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}}, {'''score''': 0.7_2_1_8, '''label''': '''remote''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}}, {'''score''': 0.7_1_8_4, '''label''': '''couch''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}}, {'''score''': 0.6_7_4_8, '''label''': '''remote''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}}, {'''score''': 0.6_6_5_6, '''label''': '''cat''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}}, {'''score''': 0.6_6_1_4, '''label''': '''couch''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}}, {'''score''': 0.6_4_5_6, '''label''': '''remote''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}}, {'''score''': 0.6_4_2, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 274, '''xmax''': 93, '''ymax''': 297}}, {'''score''': 0.6_4_1_9, '''label''': '''cat''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}}, ] ], ) @require_torch @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =pipeline('''zero-shot-object-detection''' ) lowerCamelCase_ =object_detector( '''http://images.cocodataset.org/val2017/000000039769.jpg''', candidate_labels=['''cat''', '''remote''', '''couch'''], ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=4 ), [ {'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}}, {'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}}, {'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}}, {'''score''': 0.1_4_7_4, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}}, {'''score''': 0.1_2_0_8, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}}, ], ) lowerCamelCase_ =object_detector( [ { '''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''', '''candidate_labels''': ['''cat''', '''remote''', '''couch'''], }, { '''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''', '''candidate_labels''': ['''cat''', '''remote''', '''couch'''], }, ], ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=4 ), [ [ {'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}}, {'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}}, {'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}}, {'''score''': 0.1_4_7_4, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}}, {'''score''': 0.1_2_0_8, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}}, ], [ {'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}}, {'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}}, {'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}}, {'''score''': 0.1_4_7_4, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}}, {'''score''': 0.1_2_0_8, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}}, ], ], ) @require_tf @unittest.skip('''Zero Shot Object Detection not implemented in TF''' ) def lowercase__ ( self ): """simple docstring""" pass @require_torch @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =0.2 lowerCamelCase_ =pipeline('''zero-shot-object-detection''' ) lowerCamelCase_ =object_detector( '''http://images.cocodataset.org/val2017/000000039769.jpg''', candidate_labels=['''cat''', '''remote''', '''couch'''], threshold=lowerCAmelCase, ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=4 ), [ {'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}}, {'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}}, {'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}}, ], ) @require_torch @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =2 lowerCamelCase_ =pipeline('''zero-shot-object-detection''' ) lowerCamelCase_ =object_detector( '''http://images.cocodataset.org/val2017/000000039769.jpg''', candidate_labels=['''cat''', '''remote''', '''couch'''], top_k=lowerCAmelCase, ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=4 ), [ {'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}}, {'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}}, ], )
75
1
'''simple docstring''' import random import torch from huggingface_hub import HfApi from diffusers import UNetaDModel a_ : Dict = HfApi() a_ : Tuple = {} # fmt: off a_ : Union[str, Any] = torch.tensor([ -0.75_15, -1.68_83, 0.24_20, 0.03_00, 0.63_47, 1.34_33, -1.17_43, -3.74_67, 1.23_42, -2.24_85, 0.46_36, 0.80_76, -0.79_91, 0.39_69, 0.84_98, 0.91_89, -1.88_87, -3.35_22, 0.76_39, 0.20_40, 0.62_71, -2.71_48, -1.63_16, 3.08_39, 0.31_86, 0.27_21, -0.97_59, -1.24_61, 2.62_57, 1.35_57 ]) a_ : List[str] = torch.tensor([ -2.36_39, -2.53_44, 0.00_54, -0.66_74, 1.59_90, 1.01_58, 0.31_24, -2.14_36, 1.87_95, -2.54_29, -0.15_66, -0.39_73, 1.24_90, 2.64_47, 1.22_83, -0.52_08, -2.81_54, -3.51_19, 2.38_38, 1.20_33, 1.72_01, -2.12_56, -1.45_76, 2.79_48, 2.42_04, -0.97_52, -1.25_46, 0.80_27, 3.27_58, 3.13_65 ]) a_ : Tuple = torch.tensor([ -0.65_31, -0.68_91, -0.31_72, -0.53_75, -0.91_40, -0.53_67, -0.11_75, -0.78_69, -0.38_08, -0.45_13, -0.20_98, -0.00_83, 0.31_83, 0.51_40, 0.22_47, -0.13_04, -0.13_02, -0.28_02, -0.20_84, -0.20_25, -0.49_67, -0.48_73, -0.08_61, 0.69_25, 0.02_50, 0.12_90, -0.15_43, 0.63_16, 1.04_60, 1.49_43 ]) a_ : Union[str, Any] = torch.tensor([ 0.09_11, 0.11_07, 0.01_82, 0.04_35, -0.08_05, -0.06_08, 0.03_81, 0.21_72, -0.02_80, 0.13_27, -0.02_99, -0.02_55, -0.00_50, -0.11_70, -0.10_46, 0.03_09, 0.13_67, 0.17_28, -0.05_33, -0.07_48, -0.05_34, 0.16_24, 0.03_84, -0.18_05, -0.07_07, 0.06_42, 0.02_20, -0.01_34, -0.13_33, -0.15_05 ]) a_ : Union[str, Any] = torch.tensor([ 0.13_21, 0.13_37, 0.04_40, 0.06_22, -0.05_91, -0.03_70, 0.05_03, 0.21_33, -0.01_77, 0.14_15, -0.01_16, -0.01_12, 0.00_44, -0.09_80, -0.07_89, 0.03_95, 0.15_02, 0.17_85, -0.04_88, -0.05_14, -0.04_04, 0.15_39, 0.04_54, -0.15_59, -0.06_65, 0.06_59, 0.03_83, -0.00_05, -0.12_66, -0.13_86 ]) a_ : Union[str, Any] = torch.tensor([ 0.11_54, 0.12_18, 0.03_07, 0.05_26, -0.07_11, -0.05_41, 0.03_66, 0.20_78, -0.02_67, 0.13_17, -0.02_26, -0.01_93, -0.00_14, -0.10_55, -0.09_02, 0.03_30, 0.13_91, 0.17_09, -0.05_62, -0.06_93, -0.05_60, 0.14_82, 0.03_81, -0.16_83, -0.06_81, 0.06_61, 0.03_31, -0.00_46, -0.12_68, -0.14_31 ]) a_ : Optional[int] = torch.tensor([ 0.11_92, 0.12_40, 0.04_14, 0.06_06, -0.05_57, -0.04_12, 0.04_30, 0.20_42, -0.02_00, 0.13_85, -0.01_15, -0.01_32, 0.00_17, -0.09_65, -0.08_02, 0.03_98, 0.14_33, 0.17_47, -0.04_58, -0.05_33, -0.04_07, 0.15_45, 0.04_19, -0.15_74, -0.06_45, 0.06_26, 0.03_41, -0.00_10, -0.11_99, -0.13_90 ]) a_ : Tuple = torch.tensor([ 0.10_75, 0.10_74, 0.02_05, 0.04_31, -0.07_74, -0.06_07, 0.02_98, 0.20_42, -0.03_20, 0.12_67, -0.02_81, -0.02_50, -0.00_64, -0.10_91, -0.09_46, 0.02_90, 0.13_28, 0.16_50, -0.05_80, -0.07_38, -0.05_86, 0.14_40, 0.03_37, -0.17_46, -0.07_12, 0.06_05, 0.02_50, -0.00_99, -0.13_16, -0.14_73 ]) a_ : List[Any] = torch.tensor([ -1.45_72, -2.04_81, -0.04_14, -0.60_05, 1.41_36, 0.58_48, 0.40_28, -2.73_30, 1.22_12, -2.12_28, 0.21_55, 0.40_39, 0.76_62, 2.05_35, 0.74_77, -0.32_43, -2.17_58, -2.76_48, 1.69_47, 0.70_26, 1.23_38, -1.60_78, -0.86_82, 2.28_10, 1.85_74, -0.57_18, -0.55_86, -0.01_86, 2.34_15, 2.12_51]) a_ : Tuple = torch.tensor([ -1.36_90, -1.97_20, -0.40_90, -0.69_66, 1.46_60, 0.99_38, -0.13_85, -2.73_24, 0.77_36, -1.89_17, 0.29_23, 0.42_93, 0.16_93, 1.41_12, 1.18_87, -0.31_81, -2.21_60, -2.63_81, 1.31_70, 0.81_63, 0.92_40, -1.65_44, -0.60_99, 2.52_59, 1.64_30, -0.90_90, -0.93_92, -0.01_26, 2.42_68, 2.32_66 ]) a_ : List[str] = torch.tensor([ -1.35_25, -1.96_28, -0.39_56, -0.68_60, 1.46_64, 1.00_14, -0.12_59, -2.72_12, 0.77_72, -1.88_11, 0.29_96, 0.43_88, 0.17_04, 1.40_29, 1.17_01, -0.30_27, -2.20_53, -2.62_87, 1.33_50, 0.81_31, 0.92_74, -1.62_92, -0.60_98, 2.51_31, 1.65_05, -0.89_58, -0.92_98, -0.01_51, 2.42_57, 2.33_55 ]) a_ : int = torch.tensor([ -2.05_85, -2.78_97, -0.28_50, -0.89_40, 1.90_52, 0.57_02, 0.63_45, -3.89_59, 1.59_32, -3.23_19, 0.19_74, 0.02_87, 1.75_66, 2.65_43, 0.83_87, -0.53_51, -3.27_36, -4.33_75, 2.90_29, 1.63_90, 1.46_40, -2.17_01, -1.90_13, 2.93_41, 3.49_81, -0.62_55, -1.16_44, -0.15_91, 3.70_97, 3.20_66 ]) a_ : Union[str, Any] = torch.tensor([ -2.31_39, -2.55_94, -0.01_97, -0.67_85, 1.70_01, 1.16_06, 0.30_75, -2.17_40, 1.80_71, -2.56_30, -0.09_26, -0.38_11, 1.21_16, 2.62_46, 1.27_31, -0.53_98, -2.81_53, -3.61_40, 2.38_93, 1.32_62, 1.62_58, -2.18_56, -1.32_67, 2.83_95, 2.37_79, -1.06_23, -1.24_68, 0.89_59, 3.33_67, 3.22_43 ]) a_ : Dict = torch.tensor([ -2.06_28, -2.76_67, -0.20_89, -0.82_63, 2.05_39, 0.59_92, 0.64_95, -3.83_36, 1.60_25, -3.28_17, 0.17_21, -0.06_33, 1.75_16, 2.70_39, 0.81_00, -0.59_08, -3.21_13, -4.43_43, 2.92_57, 1.36_32, 1.55_62, -2.14_89, -1.98_94, 3.05_60, 3.33_96, -0.73_28, -1.04_17, 0.03_83, 3.70_93, 3.23_43 ]) a_ : Optional[int] = torch.tensor([ -1.45_74, -2.05_69, -0.04_73, -0.61_17, 1.40_18, 0.57_69, 0.41_29, -2.73_44, 1.22_41, -2.13_97, 0.20_00, 0.39_37, 0.76_16, 2.04_53, 0.73_24, -0.33_91, -2.17_46, -2.77_44, 1.69_63, 0.69_21, 1.21_87, -1.61_72, -0.88_77, 2.24_39, 1.84_71, -0.58_39, -0.56_05, -0.04_64, 2.32_50, 2.12_19 ]) # fmt: on a_ : Union[str, Any] = api.list_models(filter="""diffusers""") for mod in models: if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256": a_ : Optional[Any] = """/home/patrick/google_checkpoints/""" + mod.modelId.split("""/""")[-1] print(F"""Started running {mod.modelId}!!!""") if mod.modelId.startswith("""CompVis"""): a_ : str = UNetaDModel.from_pretrained(local_checkpoint, subfolder="""unet""") else: a_ : int = UNetaDModel.from_pretrained(local_checkpoint) torch.manual_seed(0) random.seed(0) a_ : Optional[Any] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) a_ : str = torch.tensor([10] * noise.shape[0]) with torch.no_grad(): a_ : List[Any] = model(noise, time_step).sample assert torch.allclose( logits[0, 0, 0, :30], results["""_""".join("""_""".join(mod.modelId.split("""/""")).split("""-"""))], atol=1e-3 ) print(F"""{mod.modelId} has passed successfully!!!""")
75
'''simple docstring''' import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a_ : Optional[int] = logging.get_logger(__name__) a_ : Optional[int] = { """vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_config_file""": """tokenizer_config.json""", } a_ : List[Any] = { """vocab_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json""" }, """merges_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt""" }, """tokenizer_config_file""": { """facebook/blenderbot_small-90M""": ( """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json""" ) }, } a_ : Optional[int] = {"""facebook/blenderbot_small-90M""": 5_12} def a_ ( __snake_case : List[Any] ) -> Tuple: """simple docstring""" lowerCamelCase_ =set() lowerCamelCase_ =word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCamelCase_ =char lowerCamelCase_ =set(__snake_case ) return pairs class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[int] =VOCAB_FILES_NAMES lowercase : Tuple =PRETRAINED_VOCAB_FILES_MAP lowercase : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : Dict =['input_ids', 'attention_mask'] def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase="__start__", lowerCAmelCase="__end__", lowerCAmelCase="__unk__", lowerCAmelCase="__null__", **lowerCAmelCase, ): """simple docstring""" super().__init__(unk_token=lowerCAmelCase, bos_token=lowerCAmelCase, eos_token=lowerCAmelCase, pad_token=lowerCAmelCase, **lowerCAmelCase ) with open(lowerCAmelCase, encoding='''utf-8''' ) as vocab_handle: lowerCamelCase_ =json.load(lowerCAmelCase ) lowerCamelCase_ ={v: k for k, v in self.encoder.items()} with open(lowerCAmelCase, encoding='''utf-8''' ) as merges_handle: lowerCamelCase_ =merges_handle.read().split('''\n''' )[1:-1] lowerCamelCase_ =[tuple(merge.split() ) for merge in merges] lowerCamelCase_ =dict(zip(lowerCAmelCase, range(len(lowerCAmelCase ) ) ) ) lowerCamelCase_ ={} @property def lowercase__ ( self ): """simple docstring""" return len(self.encoder ) def lowercase__ ( self ): """simple docstring""" return dict(self.encoder, **self.added_tokens_encoder ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" if token in self.cache: return self.cache[token] lowerCamelCase_ =re.sub('''([.,!?()])''', R''' \1''', lowerCAmelCase ) lowerCamelCase_ =re.sub('''(\')''', R''' \1 ''', lowerCAmelCase ) lowerCamelCase_ =re.sub(R'''\s{2,}''', ''' ''', lowerCAmelCase ) if "\n" in token: lowerCamelCase_ =token.replace('''\n''', ''' __newln__''' ) lowerCamelCase_ =token.split(''' ''' ) lowerCamelCase_ =[] for token in tokens: if not len(lowerCAmelCase ): continue lowerCamelCase_ =token.lower() lowerCamelCase_ =tuple(lowerCAmelCase ) lowerCamelCase_ =tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] ) lowerCamelCase_ =get_pairs(lowerCAmelCase ) if not pairs: words.append(lowerCAmelCase ) continue while True: lowerCamelCase_ =min(lowerCAmelCase, key=lambda lowerCAmelCase : self.bpe_ranks.get(lowerCAmelCase, float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break lowerCamelCase_, lowerCamelCase_ =bigram lowerCamelCase_ =[] lowerCamelCase_ =0 while i < len(lowerCAmelCase ): try: lowerCamelCase_ =word.index(lowerCAmelCase, lowerCAmelCase ) new_word.extend(word[i:j] ) lowerCamelCase_ =j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(lowerCAmelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCamelCase_ =tuple(lowerCAmelCase ) lowerCamelCase_ =new_word if len(lowerCAmelCase ) == 1: break else: lowerCamelCase_ =get_pairs(lowerCAmelCase ) lowerCamelCase_ ='''@@ '''.join(lowerCAmelCase ) lowerCamelCase_ =word[:-4] lowerCamelCase_ =word words.append(lowerCAmelCase ) return " ".join(lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ =re.findall(R'''\S+\n?''', lowerCAmelCase ) for token in words: split_tokens.extend(list(self.bpe(lowerCAmelCase ).split(''' ''' ) ) ) return split_tokens def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =token.lower() return self.encoder.get(lowerCAmelCase, self.encoder.get(self.unk_token ) ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return self.decoder.get(lowerCAmelCase, self.unk_token ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =''' '''.join(lowerCAmelCase ).replace('''@@ ''', '''''' ).strip() return out_string def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" if not os.path.isdir(lowerCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCamelCase_ =os.path.join( lowerCAmelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCamelCase_ =os.path.join( lowerCAmelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(lowerCAmelCase, '''w''', encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=lowerCAmelCase, ensure_ascii=lowerCAmelCase ) + '''\n''' ) lowerCamelCase_ =0 with open(lowerCAmelCase, '''w''', encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda lowerCAmelCase : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ''' Please check that the tokenizer is not corrupted!''' ) lowerCamelCase_ =token_index writer.write(''' '''.join(lowerCAmelCase ) + '''\n''' ) index += 1 return vocab_file, merge_file
75
1
'''simple docstring''' from datetime import datetime import requests from bsa import BeautifulSoup if __name__ == "__main__": a_ : List[str] = input("""Enter image url: """).strip() print(F"""Downloading image from {url} ...""") a_ : Tuple = BeautifulSoup(requests.get(url).content, """html.parser""") # The image URL is in the content field of the first meta tag with property og:image a_ : Optional[Any] = soup.find("""meta""", {"""property""": """og:image"""})["""content"""] a_ : Dict = requests.get(image_url).content a_ : int = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg""" with open(file_name, """wb""") as fp: fp.write(image_data) print(F"""Done. Image saved to disk as {file_name}.""")
75
'''simple docstring''' from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Dict = logging.get_logger(__name__) a_ : Any = { """snap-research/efficientformer-l1-300""": ( """https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json""" ), } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : List[str] ='efficientformer' def __init__( self, lowerCAmelCase = [3, 2, 6, 4], lowerCAmelCase = [48, 96, 224, 448], lowerCAmelCase = [True, True, True, True], lowerCAmelCase = 448, lowerCAmelCase = 32, lowerCAmelCase = 4, lowerCAmelCase = 7, lowerCAmelCase = 5, lowerCAmelCase = 8, lowerCAmelCase = 4, lowerCAmelCase = 0.0, lowerCAmelCase = 16, lowerCAmelCase = 3, lowerCAmelCase = 3, lowerCAmelCase = 3, lowerCAmelCase = 2, lowerCAmelCase = 1, lowerCAmelCase = 0.0, lowerCAmelCase = 1, lowerCAmelCase = True, lowerCAmelCase = True, lowerCAmelCase = 1e-5, lowerCAmelCase = "gelu", lowerCAmelCase = 0.0_2, lowerCAmelCase = 1e-12, lowerCAmelCase = 224, lowerCAmelCase = 1e-05, **lowerCAmelCase, ): """simple docstring""" super().__init__(**lowerCAmelCase ) lowerCamelCase_ =hidden_act lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =hidden_sizes lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =initializer_range lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =patch_size lowerCamelCase_ =num_channels lowerCamelCase_ =depths lowerCamelCase_ =mlp_expansion_ratio lowerCamelCase_ =downsamples lowerCamelCase_ =dim lowerCamelCase_ =key_dim lowerCamelCase_ =attention_ratio lowerCamelCase_ =resolution lowerCamelCase_ =pool_size lowerCamelCase_ =downsample_patch_size lowerCamelCase_ =downsample_stride lowerCamelCase_ =downsample_pad lowerCamelCase_ =drop_path_rate lowerCamelCase_ =num_metaad_blocks lowerCamelCase_ =distillation lowerCamelCase_ =use_layer_scale lowerCamelCase_ =layer_scale_init_value lowerCamelCase_ =image_size lowerCamelCase_ =batch_norm_eps
75
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPixaPixPipeline, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase : List[Any] =StableDiffusionInstructPixaPixPipeline lowercase : List[Any] =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'} lowercase : Optional[Any] =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS lowercase : Union[str, Any] =IMAGE_TO_IMAGE_IMAGE_PARAMS lowercase : List[Any] =IMAGE_TO_IMAGE_IMAGE_PARAMS def lowercase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCamelCase_ =UNetaDConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=8, out_channels=4, down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D'''), up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D'''), cross_attention_dim=32, ) lowerCamelCase_ =PNDMScheduler(skip_prk_steps=lowerCAmelCase ) torch.manual_seed(0 ) lowerCamelCase_ =AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], latent_channels=4, ) torch.manual_seed(0 ) lowerCamelCase_ =CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, ) lowerCamelCase_ =CLIPTextModel(lowerCAmelCase ) lowerCamelCase_ =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) lowerCamelCase_ ={ '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=0 ): """simple docstring""" lowerCamelCase_ =floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase ) lowerCamelCase_ =image.cpu().permute(0, 2, 3, 1 )[0] lowerCamelCase_ =Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('''RGB''' ) if str(lowerCAmelCase ).startswith('''mps''' ): lowerCamelCase_ =torch.manual_seed(lowerCAmelCase ) else: lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase ) lowerCamelCase_ ={ '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''image_guidance_scale''': 1, '''output_type''': '''numpy''', } return inputs def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCamelCase_ =self.get_dummy_components() lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase ) lowerCamelCase_ =sd_pipe.to(lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase ) lowerCamelCase_ =sd_pipe(**lowerCAmelCase ).images lowerCamelCase_ =image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCamelCase_ =np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCamelCase_ =self.get_dummy_components() lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase ) lowerCamelCase_ =sd_pipe.to(lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase ) lowerCamelCase_ ='''french fries''' lowerCamelCase_ =sd_pipe(**lowerCAmelCase, negative_prompt=lowerCAmelCase ) lowerCamelCase_ =output.images lowerCamelCase_ =image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCamelCase_ =np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCamelCase_ =self.get_dummy_components() lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase ) lowerCamelCase_ =sd_pipe.to(lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase ) lowerCamelCase_ =[inputs['''prompt''']] * 2 lowerCamelCase_ =np.array(inputs['''image'''] ).astype(np.floataa ) / 2_5_5.0 lowerCamelCase_ =torch.from_numpy(lowerCAmelCase ).unsqueeze(0 ).to(lowerCAmelCase ) lowerCamelCase_ =image / 2 + 0.5 lowerCamelCase_ =image.permute(0, 3, 1, 2 ) lowerCamelCase_ =image.repeat(2, 1, 1, 1 ) lowerCamelCase_ =sd_pipe(**lowerCAmelCase ).images lowerCamelCase_ =image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) lowerCamelCase_ =np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCamelCase_ =self.get_dummy_components() lowerCamelCase_ =EulerAncestralDiscreteScheduler( beta_start=0.0_0_0_8_5, beta_end=0.0_1_2, beta_schedule='''scaled_linear''' ) lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase ) lowerCamelCase_ =sd_pipe.to(lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase ) lowerCamelCase_ =sd_pipe(**lowerCAmelCase ).images lowerCamelCase_ =image[0, -3:, -3:, -1] lowerCamelCase_ =[round(lowerCAmelCase, 4 ) for x in image_slice.flatten().tolist()] print(''','''.join([str(lowerCAmelCase ) for x in slice] ) ) assert image.shape == (1, 32, 32, 3) lowerCamelCase_ =np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def lowercase__ ( self ): """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_dummy_components() lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase ) lowerCamelCase_ =VaeImageProcessor(do_resize=lowerCAmelCase, do_normalize=lowerCAmelCase ) lowerCamelCase_ =pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =pipe(**self.get_dummy_inputs_by_type(lowerCAmelCase, input_image_type='''pt''' ) )[0] lowerCamelCase_ =components['''vae'''] lowerCamelCase_ =self.get_dummy_inputs_by_type(lowerCAmelCase, input_image_type='''pt''' ) for image_param in self.image_latents_params: if image_param in inputs.keys(): lowerCamelCase_ =vae.encode(inputs[image_param] ).latent_dist.mode() lowerCamelCase_ =pipe(**lowerCAmelCase )[0] lowerCamelCase_ =np.abs(out - out_latents_inputs ).max() self.assertLess(lowerCAmelCase, 1e-4, '''passing latents as image input generate different result from passing image''' ) @slow @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): def lowercase__ ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase__ ( self, lowerCAmelCase=0 ): """simple docstring""" lowerCamelCase_ =torch.manual_seed(lowerCAmelCase ) lowerCamelCase_ =load_image( '''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' ) lowerCamelCase_ ={ '''prompt''': '''turn him into a cyborg''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''image_guidance_scale''': 1.0, '''output_type''': '''numpy''', } return inputs def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''', safety_checker=lowerCAmelCase ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) pipe.enable_attention_slicing() lowerCamelCase_ =self.get_inputs() lowerCamelCase_ =pipe(**lowerCAmelCase ).images lowerCamelCase_ =image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) lowerCamelCase_ =np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''', safety_checker=lowerCAmelCase ) lowerCamelCase_ =LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) pipe.enable_attention_slicing() lowerCamelCase_ =self.get_inputs() lowerCamelCase_ =pipe(**lowerCAmelCase ).images lowerCamelCase_ =image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) lowerCamelCase_ =np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''', safety_checker=lowerCAmelCase ) lowerCamelCase_ =DDIMScheduler.from_config(pipe.scheduler.config ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) pipe.enable_attention_slicing() lowerCamelCase_ =self.get_inputs() lowerCamelCase_ =pipe(**lowerCAmelCase ).images lowerCamelCase_ =image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) lowerCamelCase_ =np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =0 def callback_fn(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) -> None: lowerCamelCase_ =True nonlocal number_of_steps number_of_steps += 1 if step == 1: lowerCamelCase_ =latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) lowerCamelCase_ =latents[0, -3:, -3:, -1] lowerCamelCase_ =np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 elif step == 2: lowerCamelCase_ =latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) lowerCamelCase_ =latents[0, -3:, -3:, -1] lowerCamelCase_ =np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 lowerCamelCase_ =False lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''', safety_checker=lowerCAmelCase, torch_dtype=torch.floataa ) lowerCamelCase_ =pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) pipe.enable_attention_slicing() lowerCamelCase_ =self.get_inputs() pipe(**lowerCAmelCase, callback=lowerCAmelCase, callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def lowercase__ ( self ): """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''', safety_checker=lowerCAmelCase, torch_dtype=torch.floataa ) lowerCamelCase_ =pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() lowerCamelCase_ =self.get_inputs() lowerCamelCase_ =pipe(**lowerCAmelCase ) lowerCamelCase_ =torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 10**9 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 lowerCamelCase_ =inputs['''image'''].resize((504, 504) ) lowerCamelCase_ ='''timbrooks/instruct-pix2pix''' lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained( lowerCAmelCase, safety_checker=lowerCAmelCase, ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) pipe.enable_attention_slicing() lowerCamelCase_ =pipe(**lowerCAmelCase ) lowerCamelCase_ =output.images[0] lowerCamelCase_ =image[255:258, 383:386, -1] assert image.shape == (504, 504, 3) lowerCamelCase_ =np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
75
'''simple docstring''' import itertools import random import unittest import numpy as np from transformers import is_speech_available from transformers.testing_utils import require_torch, require_torchaudio from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import SpeechaTextFeatureExtractor a_ : Union[str, Any] = random.Random() def a_ ( __snake_case : int , __snake_case : int=1.0 , __snake_case : Tuple=None , __snake_case : Union[str, Any]=None ) -> str: """simple docstring""" if rng is None: lowerCamelCase_ =global_rng lowerCamelCase_ =[] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class __UpperCamelCase ( unittest.TestCase ): def __init__( self, lowerCAmelCase, lowerCAmelCase=7, lowerCAmelCase=400, lowerCAmelCase=2_000, lowerCAmelCase=24, lowerCAmelCase=24, lowerCAmelCase=0.0, lowerCAmelCase=16_000, lowerCAmelCase=True, lowerCAmelCase=True, ): """simple docstring""" lowerCamelCase_ =parent lowerCamelCase_ =batch_size lowerCamelCase_ =min_seq_length lowerCamelCase_ =max_seq_length lowerCamelCase_ =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) lowerCamelCase_ =feature_size lowerCamelCase_ =num_mel_bins lowerCamelCase_ =padding_value lowerCamelCase_ =sampling_rate lowerCamelCase_ =return_attention_mask lowerCamelCase_ =do_normalize def lowercase__ ( self ): """simple docstring""" return { "feature_size": self.feature_size, "num_mel_bins": self.num_mel_bins, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def lowercase__ ( self, lowerCAmelCase=False, lowerCAmelCase=False ): """simple docstring""" def _flatten(lowerCAmelCase ): return list(itertools.chain(*lowerCAmelCase ) ) if equal_length: lowerCamelCase_ =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size lowerCamelCase_ =[ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff ) ] if numpify: lowerCamelCase_ =[np.asarray(lowerCAmelCase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ): lowercase : Any =SpeechaTextFeatureExtractor if is_speech_available() else None def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =SpeechaTextFeatureExtractionTester(self ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" self.assertTrue(np.all(np.mean(lowerCAmelCase, axis=0 ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase, axis=0 ) - 1 ) < 1e-3 ) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 lowerCamelCase_ =[floats_list((1, x) )[0] for x in range(800, 1_400, 200 )] lowerCamelCase_ =[np.asarray(lowerCAmelCase ) for speech_input in speech_inputs] # Test feature size lowerCamelCase_ =feature_extractor(lowerCAmelCase, padding=lowerCAmelCase, return_tensors='''np''' ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size ) # Test not batched input lowerCamelCase_ =feature_extractor(speech_inputs[0], return_tensors='''np''' ).input_features lowerCamelCase_ =feature_extractor(np_speech_inputs[0], return_tensors='''np''' ).input_features self.assertTrue(np.allclose(lowerCAmelCase, lowerCAmelCase, atol=1e-3 ) ) # Test batched lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''np''' ).input_features lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(lowerCAmelCase, lowerCAmelCase ): self.assertTrue(np.allclose(lowerCAmelCase, lowerCAmelCase, atol=1e-3 ) ) # Test 2-D numpy arrays are batched. lowerCamelCase_ =[floats_list((1, x) )[0] for x in (800, 800, 800)] lowerCamelCase_ =np.asarray(lowerCAmelCase ) lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''np''' ).input_features lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(lowerCAmelCase, lowerCAmelCase ): self.assertTrue(np.allclose(lowerCAmelCase, lowerCAmelCase, atol=1e-3 ) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase_ =[floats_list((1, x) )[0] for x in range(800, 1_400, 200 )] lowerCamelCase_ =['''longest''', '''max_length''', '''do_not_pad'''] lowerCamelCase_ =[None, 16, None] for max_length, padding in zip(lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =feature_extractor( lowerCAmelCase, padding=lowerCAmelCase, max_length=lowerCAmelCase, return_attention_mask=lowerCAmelCase ) lowerCamelCase_ =inputs.input_features lowerCamelCase_ =inputs.attention_mask lowerCamelCase_ =[np.sum(lowerCAmelCase ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase_ =[floats_list((1, x) )[0] for x in range(800, 1_400, 200 )] lowerCamelCase_ =['''longest''', '''max_length''', '''do_not_pad'''] lowerCamelCase_ =[None, 16, None] for max_length, padding in zip(lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =feature_extractor( lowerCAmelCase, max_length=lowerCAmelCase, padding=lowerCAmelCase, return_tensors='''np''', return_attention_mask=lowerCAmelCase ) lowerCamelCase_ =inputs.input_features lowerCamelCase_ =inputs.attention_mask lowerCamelCase_ =[np.sum(lowerCAmelCase ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase_ =[floats_list((1, x) )[0] for x in range(800, 1_400, 200 )] lowerCamelCase_ =feature_extractor( lowerCAmelCase, padding='''max_length''', max_length=4, truncation=lowerCAmelCase, return_tensors='''np''', return_attention_mask=lowerCAmelCase, ) lowerCamelCase_ =inputs.input_features lowerCamelCase_ =inputs.attention_mask lowerCamelCase_ =np.sum(attention_mask == 1, axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1] ) self._check_zero_mean_unit_variance(input_features[2] ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase_ =[floats_list((1, x) )[0] for x in range(800, 1_400, 200 )] lowerCamelCase_ =feature_extractor( lowerCAmelCase, padding='''longest''', max_length=4, truncation=lowerCAmelCase, return_tensors='''np''', return_attention_mask=lowerCAmelCase, ) lowerCamelCase_ =inputs.input_features lowerCamelCase_ =inputs.attention_mask lowerCamelCase_ =np.sum(attention_mask == 1, axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape, (3, 4, 24) ) lowerCamelCase_ =[floats_list((1, x) )[0] for x in range(800, 1_400, 200 )] lowerCamelCase_ =feature_extractor( lowerCAmelCase, padding='''longest''', max_length=16, truncation=lowerCAmelCase, return_tensors='''np''', return_attention_mask=lowerCAmelCase, ) lowerCamelCase_ =inputs.input_features lowerCamelCase_ =inputs.attention_mask lowerCamelCase_ =np.sum(attention_mask == 1, axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape, (3, 6, 24) ) def lowercase__ ( self ): """simple docstring""" import torch lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase_ =np.random.rand(100, 32 ).astype(np.floataa ) lowerCamelCase_ =np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: lowerCamelCase_ =feature_extractor.pad([{'''input_features''': inputs}], return_tensors='''np''' ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) lowerCamelCase_ =feature_extractor.pad([{'''input_features''': inputs}], return_tensors='''pt''' ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" from datasets import load_dataset lowerCamelCase_ =load_dataset('''hf-internal-testing/librispeech_asr_dummy''', '''clean''', split='''validation''' ) # automatic decoding with librispeech lowerCamelCase_ =ds.sort('''id''' ).select(range(lowerCAmelCase ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =np.array([ -1.5_7_4_5, -1.7_7_1_3, -1.7_0_2_0, -1.6_0_6_9, -1.2_2_5_0, -1.1_1_0_5, -0.9_0_7_2, -0.8_2_4_1, -1.2_3_1_0, -0.8_0_9_8, -0.3_3_2_0, -0.4_1_0_1, -0.7_9_8_5, -0.4_9_9_6, -0.8_2_1_3, -0.9_1_2_8, -1.0_4_2_0, -1.1_2_8_6, -1.0_4_4_0, -0.7_9_9_9, -0.8_4_0_5, -1.2_2_7_5, -1.5_4_4_3, -1.4_6_2_5, ] ) # fmt: on lowerCamelCase_ =self._load_datasamples(1 ) lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''pt''' ).input_features self.assertEquals(input_features.shape, (1, 584, 24) ) self.assertTrue(np.allclose(input_features[0, 0, :30], lowerCAmelCase, atol=1e-4 ) )
75
1
'''simple docstring''' def a_ ( __snake_case : int = 100 ) -> int: """simple docstring""" lowerCamelCase_ =n * (n + 1) * (2 * n + 1) / 6 lowerCamelCase_ =(n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(F"""{solution() = }""")
75
'''simple docstring''' def a_ ( __snake_case : Any , __snake_case : List[str] ) -> str: """simple docstring""" lowerCamelCase_ ='''''' for i in table: res += inp[i - 1] return res def a_ ( __snake_case : List[str] ) -> Optional[int]: """simple docstring""" return data[1:] + data[0] def a_ ( __snake_case : str , __snake_case : Tuple ) -> int: """simple docstring""" lowerCamelCase_ ='''''' for i in range(len(__snake_case ) ): if a[i] == b[i]: res += "0" else: res += "1" return res def a_ ( __snake_case : Optional[Any] , __snake_case : Tuple ) -> List[Any]: """simple docstring""" lowerCamelCase_ =int('''0b''' + data[0] + data[-1] , 2 ) lowerCamelCase_ =int('''0b''' + data[1:3] , 2 ) return bin(s[row][col] )[2:] def a_ ( __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : int , __snake_case : Tuple , __snake_case : List[Any] ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ =message[:4] lowerCamelCase_ =message[4:] lowerCamelCase_ =apply_table(__snake_case , __snake_case ) lowerCamelCase_ =xor(__snake_case , __snake_case ) lowerCamelCase_ =apply_sbox(__snake_case , temp[:4] ) # noqa: E741 lowerCamelCase_ =apply_sbox(__snake_case , temp[4:] ) lowerCamelCase_ ='''0''' * (2 - len(__snake_case )) + l # noqa: E741 lowerCamelCase_ ='''0''' * (2 - len(__snake_case )) + r lowerCamelCase_ =apply_table(l + r , __snake_case ) lowerCamelCase_ =xor(__snake_case , __snake_case ) return temp + right if __name__ == "__main__": a_ : Any = input("""Enter 10 bit key: """) a_ : Any = input("""Enter 8 bit message: """) a_ : str = [6, 3, 7, 4, 8, 5, 10, 9] a_ : str = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6] a_ : str = [2, 4, 3, 1] a_ : Optional[int] = [2, 6, 3, 1, 4, 8, 5, 7] a_ : Optional[Any] = [4, 1, 3, 5, 7, 2, 8, 6] a_ : Union[str, Any] = [4, 1, 2, 3, 2, 3, 4, 1] a_ : int = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]] a_ : Any = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]] # key generation a_ : List[Any] = apply_table(key, paa_table) a_ : str = temp[:5] a_ : Optional[Any] = temp[5:] a_ : Tuple = left_shift(left) a_ : Optional[Any] = left_shift(right) a_ : str = apply_table(left + right, pa_table) a_ : Optional[Any] = left_shift(left) a_ : Tuple = left_shift(right) a_ : Union[str, Any] = left_shift(left) a_ : List[str] = left_shift(right) a_ : Optional[int] = apply_table(left + right, pa_table) # encryption a_ : Optional[int] = apply_table(message, IP) a_ : List[Any] = function(expansion, sa, sa, keya, temp) a_ : str = temp[4:] + temp[:4] a_ : List[str] = function(expansion, sa, sa, keya, temp) a_ : Union[str, Any] = apply_table(temp, IP_inv) print("""Cipher text is:""", CT) # decryption a_ : Optional[int] = apply_table(CT, IP) a_ : List[Any] = function(expansion, sa, sa, keya, temp) a_ : int = temp[4:] + temp[:4] a_ : int = function(expansion, sa, sa, keya, temp) a_ : Optional[int] = apply_table(temp, IP_inv) print("""Plain text after decypting is:""", PT)
75
1
'''simple docstring''' import math from collections.abc import Iterator from itertools import takewhile def a_ ( __snake_case : int ) -> bool: """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__snake_case ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def a_ ( ) -> Iterator[int]: """simple docstring""" lowerCamelCase_ =2 while True: if is_prime(__snake_case ): yield num num += 1 def a_ ( __snake_case : int = 200_0000 ) -> int: """simple docstring""" return sum(takewhile(lambda __snake_case : x < n , prime_generator() ) ) if __name__ == "__main__": print(F"""{solution() = }""")
75
'''simple docstring''' import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all feature extractors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...feature_extraction_utils import FeatureExtractionMixin from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) a_ : List[Any] = logging.get_logger(__name__) a_ : Tuple = OrderedDict( [ ("""audio-spectrogram-transformer""", """ASTFeatureExtractor"""), ("""beit""", """BeitFeatureExtractor"""), ("""chinese_clip""", """ChineseCLIPFeatureExtractor"""), ("""clap""", """ClapFeatureExtractor"""), ("""clip""", """CLIPFeatureExtractor"""), ("""clipseg""", """ViTFeatureExtractor"""), ("""conditional_detr""", """ConditionalDetrFeatureExtractor"""), ("""convnext""", """ConvNextFeatureExtractor"""), ("""cvt""", """ConvNextFeatureExtractor"""), ("""data2vec-audio""", """Wav2Vec2FeatureExtractor"""), ("""data2vec-vision""", """BeitFeatureExtractor"""), ("""deformable_detr""", """DeformableDetrFeatureExtractor"""), ("""deit""", """DeiTFeatureExtractor"""), ("""detr""", """DetrFeatureExtractor"""), ("""dinat""", """ViTFeatureExtractor"""), ("""donut-swin""", """DonutFeatureExtractor"""), ("""dpt""", """DPTFeatureExtractor"""), ("""encodec""", """EncodecFeatureExtractor"""), ("""flava""", """FlavaFeatureExtractor"""), ("""glpn""", """GLPNFeatureExtractor"""), ("""groupvit""", """CLIPFeatureExtractor"""), ("""hubert""", """Wav2Vec2FeatureExtractor"""), ("""imagegpt""", """ImageGPTFeatureExtractor"""), ("""layoutlmv2""", """LayoutLMv2FeatureExtractor"""), ("""layoutlmv3""", """LayoutLMv3FeatureExtractor"""), ("""levit""", """LevitFeatureExtractor"""), ("""maskformer""", """MaskFormerFeatureExtractor"""), ("""mctct""", """MCTCTFeatureExtractor"""), ("""mobilenet_v1""", """MobileNetV1FeatureExtractor"""), ("""mobilenet_v2""", """MobileNetV2FeatureExtractor"""), ("""mobilevit""", """MobileViTFeatureExtractor"""), ("""nat""", """ViTFeatureExtractor"""), ("""owlvit""", """OwlViTFeatureExtractor"""), ("""perceiver""", """PerceiverFeatureExtractor"""), ("""poolformer""", """PoolFormerFeatureExtractor"""), ("""regnet""", """ConvNextFeatureExtractor"""), ("""resnet""", """ConvNextFeatureExtractor"""), ("""segformer""", """SegformerFeatureExtractor"""), ("""sew""", """Wav2Vec2FeatureExtractor"""), ("""sew-d""", """Wav2Vec2FeatureExtractor"""), ("""speech_to_text""", """Speech2TextFeatureExtractor"""), ("""speecht5""", """SpeechT5FeatureExtractor"""), ("""swiftformer""", """ViTFeatureExtractor"""), ("""swin""", """ViTFeatureExtractor"""), ("""swinv2""", """ViTFeatureExtractor"""), ("""table-transformer""", """DetrFeatureExtractor"""), ("""timesformer""", """VideoMAEFeatureExtractor"""), ("""tvlt""", """TvltFeatureExtractor"""), ("""unispeech""", """Wav2Vec2FeatureExtractor"""), ("""unispeech-sat""", """Wav2Vec2FeatureExtractor"""), ("""van""", """ConvNextFeatureExtractor"""), ("""videomae""", """VideoMAEFeatureExtractor"""), ("""vilt""", """ViltFeatureExtractor"""), ("""vit""", """ViTFeatureExtractor"""), ("""vit_mae""", """ViTFeatureExtractor"""), ("""vit_msn""", """ViTFeatureExtractor"""), ("""wav2vec2""", """Wav2Vec2FeatureExtractor"""), ("""wav2vec2-conformer""", """Wav2Vec2FeatureExtractor"""), ("""wavlm""", """Wav2Vec2FeatureExtractor"""), ("""whisper""", """WhisperFeatureExtractor"""), ("""xclip""", """CLIPFeatureExtractor"""), ("""yolos""", """YolosFeatureExtractor"""), ] ) a_ : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES) def a_ ( __snake_case : str ) -> Any: """simple docstring""" for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items(): if class_name in extractors: lowerCamelCase_ =model_type_to_module_name(__snake_case ) lowerCamelCase_ =importlib.import_module(F'''.{module_name}''' , '''transformers.models''' ) try: return getattr(__snake_case , __snake_case ) except AttributeError: continue for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items(): if getattr(__snake_case , '''__name__''' , __snake_case ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. lowerCamelCase_ =importlib.import_module('''transformers''' ) if hasattr(__snake_case , __snake_case ): return getattr(__snake_case , __snake_case ) return None def a_ ( __snake_case : Union[str, os.PathLike] , __snake_case : Optional[Union[str, os.PathLike]] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[Dict[str, str]] = None , __snake_case : Optional[Union[bool, str]] = None , __snake_case : Optional[str] = None , __snake_case : bool = False , **__snake_case : Union[str, Any] , ) -> List[str]: """simple docstring""" lowerCamelCase_ =get_file_from_repo( __snake_case , __snake_case , cache_dir=__snake_case , force_download=__snake_case , resume_download=__snake_case , proxies=__snake_case , use_auth_token=__snake_case , revision=__snake_case , local_files_only=__snake_case , ) if resolved_config_file is None: logger.info( '''Could not locate the feature extractor configuration file, will try to use the model config instead.''' ) return {} with open(__snake_case , encoding='''utf-8''' ) as reader: return json.load(__snake_case ) class __UpperCamelCase : def __init__( self ): """simple docstring""" raise EnvironmentError( '''AutoFeatureExtractor is designed to be instantiated ''' '''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' ) @classmethod @replace_list_option_in_docstrings(lowerCAmelCase ) def lowercase__ ( cls, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =kwargs.pop('''config''', lowerCAmelCase ) lowerCamelCase_ =kwargs.pop('''trust_remote_code''', lowerCAmelCase ) lowerCamelCase_ =True lowerCamelCase_, lowerCamelCase_ =FeatureExtractionMixin.get_feature_extractor_dict(lowerCAmelCase, **lowerCAmelCase ) lowerCamelCase_ =config_dict.get('''feature_extractor_type''', lowerCAmelCase ) lowerCamelCase_ =None if "AutoFeatureExtractor" in config_dict.get('''auto_map''', {} ): lowerCamelCase_ =config_dict['''auto_map''']['''AutoFeatureExtractor'''] # If we don't find the feature extractor class in the feature extractor config, let's try the model config. if feature_extractor_class is None and feature_extractor_auto_map is None: if not isinstance(lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase, **lowerCAmelCase ) # It could be in `config.feature_extractor_type`` lowerCamelCase_ =getattr(lowerCAmelCase, '''feature_extractor_type''', lowerCAmelCase ) if hasattr(lowerCAmelCase, '''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map: lowerCamelCase_ =config.auto_map['''AutoFeatureExtractor'''] if feature_extractor_class is not None: lowerCamelCase_ =feature_extractor_class_from_name(lowerCAmelCase ) lowerCamelCase_ =feature_extractor_auto_map is not None lowerCamelCase_ =feature_extractor_class is not None or type(lowerCAmelCase ) in FEATURE_EXTRACTOR_MAPPING lowerCamelCase_ =resolve_trust_remote_code( lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) if has_remote_code and trust_remote_code: lowerCamelCase_ =get_class_from_dynamic_module( lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ) lowerCamelCase_ =kwargs.pop('''code_revision''', lowerCAmelCase ) if os.path.isdir(lowerCAmelCase ): feature_extractor_class.register_for_auto_class() return feature_extractor_class.from_dict(lowerCAmelCase, **lowerCAmelCase ) elif feature_extractor_class is not None: return feature_extractor_class.from_dict(lowerCAmelCase, **lowerCAmelCase ) # Last try: we use the FEATURE_EXTRACTOR_MAPPING. elif type(lowerCAmelCase ) in FEATURE_EXTRACTOR_MAPPING: lowerCamelCase_ =FEATURE_EXTRACTOR_MAPPING[type(lowerCAmelCase )] return feature_extractor_class.from_dict(lowerCAmelCase, **lowerCAmelCase ) raise ValueError( f'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a ''' f'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following ''' f'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' ) @staticmethod def lowercase__ ( lowerCAmelCase, lowerCAmelCase ): """simple docstring""" FEATURE_EXTRACTOR_MAPPING.register(lowerCAmelCase, lowerCAmelCase )
75
1
'''simple docstring''' import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch a_ : Dict = logging.get_logger(__name__) class __UpperCamelCase : def __init__( self, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase=None, lowerCAmelCase=None ): """simple docstring""" if not conversation_id: lowerCamelCase_ =uuid.uuida() if past_user_inputs is None: lowerCamelCase_ =[] if generated_responses is None: lowerCamelCase_ =[] lowerCamelCase_ =conversation_id lowerCamelCase_ =past_user_inputs lowerCamelCase_ =generated_responses lowerCamelCase_ =text def __eq__( self, lowerCAmelCase ): """simple docstring""" if not isinstance(lowerCAmelCase, lowerCAmelCase ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = False ): """simple docstring""" if self.new_user_input: if overwrite: logger.warning( f'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten ''' f'''with: "{text}".''' ) lowerCamelCase_ =text else: logger.warning( f'''User input added while unprocessed input was existing: "{self.new_user_input}" new input ''' f'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' ) else: lowerCamelCase_ =text def lowercase__ ( self ): """simple docstring""" if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) lowerCamelCase_ =None def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" self.generated_responses.append(lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" for user_input, generated_response in zip(self.past_user_inputs, self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self ): """simple docstring""" lowerCamelCase_ =f'''Conversation id: {self.uuid} \n''' for is_user, text in self.iter_texts(): lowerCamelCase_ ='''user''' if is_user else '''bot''' output += f'''{name} >> {text} \n''' return output @add_end_docstrings( lowerCamelCase__ , r'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ' , ) class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" super().__init__(*lowerCAmelCase, **lowerCAmelCase ) if self.tokenizer.pad_token_id is None: lowerCamelCase_ =self.tokenizer.eos_token def lowercase__ ( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ ={} lowerCamelCase_ ={} lowerCamelCase_ ={} if min_length_for_response is not None: lowerCamelCase_ =min_length_for_response if minimum_tokens is not None: lowerCamelCase_ =minimum_tokens if "max_length" in generate_kwargs: lowerCamelCase_ =generate_kwargs['''max_length'''] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: lowerCamelCase_ =clean_up_tokenization_spaces if generate_kwargs: forward_params.update(lowerCAmelCase ) return preprocess_params, forward_params, postprocess_params def __call__( self, lowerCAmelCase, lowerCAmelCase=0, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =super().__call__(lowerCAmelCase, num_workers=lowerCAmelCase, **lowerCAmelCase ) if isinstance(lowerCAmelCase, lowerCAmelCase ) and len(lowerCAmelCase ) == 1: return outputs[0] return outputs def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=32 ): """simple docstring""" if not isinstance(lowerCAmelCase, lowerCAmelCase ): raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' ) if conversation.new_user_input is None: raise ValueError( f'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. ''' '''Add user inputs with the conversation\'s `add_user_input` method''' ) if hasattr(self.tokenizer, '''_build_conversation_input_ids''' ): lowerCamelCase_ =self.tokenizer._build_conversation_input_ids(lowerCAmelCase ) else: # If the tokenizer cannot handle conversations, we default to only the old version lowerCamelCase_ =self._legacy_parse_and_tokenize(lowerCAmelCase ) if self.framework == "pt": lowerCamelCase_ =torch.LongTensor([input_ids] ) elif self.framework == "tf": lowerCamelCase_ =tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=10, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =generate_kwargs.get('''max_length''', self.model.config.max_length ) lowerCamelCase_ =model_inputs['''input_ids'''].shape[1] if max_length - minimum_tokens < n: logger.warning(f'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' ) lowerCamelCase_ =max_length - minimum_tokens lowerCamelCase_ =model_inputs['''input_ids'''][:, -trim:] if "attention_mask" in model_inputs: lowerCamelCase_ =model_inputs['''attention_mask'''][:, -trim:] lowerCamelCase_ =model_inputs.pop('''conversation''' ) lowerCamelCase_ =max_length lowerCamelCase_ =self.model.generate(**lowerCAmelCase, **lowerCAmelCase ) if self.model.config.is_encoder_decoder: lowerCamelCase_ =1 else: lowerCamelCase_ =n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=True ): """simple docstring""" lowerCamelCase_ =model_outputs['''output_ids'''] lowerCamelCase_ =self.tokenizer.decode( output_ids[0], skip_special_tokens=lowerCAmelCase, clean_up_tokenization_spaces=lowerCAmelCase, ) lowerCamelCase_ =model_outputs['''conversation'''] conversation.mark_processed() conversation.append_response(lowerCAmelCase ) return conversation def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.tokenizer.eos_token_id lowerCamelCase_ =[] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(lowerCAmelCase, add_special_tokens=lowerCAmelCase ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(lowerCAmelCase, add_special_tokens=lowerCAmelCase ) ) if len(lowerCAmelCase ) > self.tokenizer.model_max_length: lowerCamelCase_ =input_ids[-self.tokenizer.model_max_length :] return input_ids
75
'''simple docstring''' import argparse import csv import logging import os import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from tqdm import tqdm, trange from transformers import ( CONFIG_NAME, WEIGHTS_NAME, AdamW, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, get_linear_schedule_with_warmup, ) logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) a_ : Optional[int] = logging.getLogger(__name__) def a_ ( __snake_case : Union[str, Any] , __snake_case : Union[str, Any] ) -> str: """simple docstring""" lowerCamelCase_ =np.argmax(__snake_case , axis=1 ) return np.sum(outputs == labels ) def a_ ( __snake_case : List[Any] ) -> Tuple: """simple docstring""" with open(__snake_case , encoding='''utf_8''' ) as f: lowerCamelCase_ =csv.reader(__snake_case ) lowerCamelCase_ =[] next(__snake_case ) # skip the first line for line in tqdm(__snake_case ): output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) ) return output def a_ ( __snake_case : str , __snake_case : Dict , __snake_case : List[str] , __snake_case : List[str] , __snake_case : List[Any] , __snake_case : Dict ) -> Dict: """simple docstring""" lowerCamelCase_ =[] for dataset in encoded_datasets: lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =np.zeros((n_batch, 2, input_len) , dtype=np.intaa ) lowerCamelCase_ =np.zeros((n_batch, 2) , dtype=np.intaa ) lowerCamelCase_ =np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa ) lowerCamelCase_ =np.zeros((n_batch,) , dtype=np.intaa ) for ( i, (story, conta, conta, mc_label), ) in enumerate(__snake_case ): lowerCamelCase_ =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] lowerCamelCase_ =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] lowerCamelCase_ =with_conta lowerCamelCase_ =with_conta lowerCamelCase_ =len(__snake_case ) - 1 lowerCamelCase_ =len(__snake_case ) - 1 lowerCamelCase_ =with_conta lowerCamelCase_ =with_conta lowerCamelCase_ =mc_label lowerCamelCase_ =(input_ids, mc_token_ids, lm_labels, mc_labels) tensor_datasets.append(tuple(torch.tensor(__snake_case ) for t in all_inputs ) ) return tensor_datasets def a_ ( ) -> Optional[int]: """simple docstring""" lowerCamelCase_ =argparse.ArgumentParser() parser.add_argument('''--model_name''' , type=__snake_case , default='''openai-gpt''' , help='''pretrained model name''' ) parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' ) parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''' ) parser.add_argument( '''--output_dir''' , default=__snake_case , type=__snake_case , required=__snake_case , help='''The output directory where the model predictions and checkpoints will be written.''' , ) parser.add_argument('''--train_dataset''' , type=__snake_case , default='''''' ) parser.add_argument('''--eval_dataset''' , type=__snake_case , default='''''' ) parser.add_argument('''--seed''' , type=__snake_case , default=42 ) parser.add_argument('''--num_train_epochs''' , type=__snake_case , default=3 ) parser.add_argument('''--train_batch_size''' , type=__snake_case , default=8 ) parser.add_argument('''--eval_batch_size''' , type=__snake_case , default=16 ) parser.add_argument('''--adam_epsilon''' , default=1e-8 , type=__snake_case , help='''Epsilon for Adam optimizer.''' ) parser.add_argument('''--max_grad_norm''' , type=__snake_case , default=1 ) parser.add_argument( '''--max_steps''' , default=-1 , type=__snake_case , help=( '''If > 0: set total number of training steps to perform. Override num_train_epochs.''' ) , ) parser.add_argument( '''--gradient_accumulation_steps''' , type=__snake_case , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , ) parser.add_argument('''--learning_rate''' , type=__snake_case , default=6.25e-5 ) parser.add_argument('''--warmup_steps''' , default=0 , type=__snake_case , help='''Linear warmup over warmup_steps.''' ) parser.add_argument('''--lr_schedule''' , type=__snake_case , default='''warmup_linear''' ) parser.add_argument('''--weight_decay''' , type=__snake_case , default=0.0_1 ) parser.add_argument('''--lm_coef''' , type=__snake_case , default=0.9 ) parser.add_argument('''--n_valid''' , type=__snake_case , default=374 ) parser.add_argument('''--server_ip''' , type=__snake_case , default='''''' , help='''Can be used for distant debugging.''' ) parser.add_argument('''--server_port''' , type=__snake_case , default='''''' , help='''Can be used for distant debugging.''' ) lowerCamelCase_ =parser.parse_args() print(__snake_case ) if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('''Waiting for debugger attach''' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__snake_case ) ptvsd.wait_for_attach() random.seed(args.seed ) np.random.seed(args.seed ) torch.manual_seed(args.seed ) torch.cuda.manual_seed_all(args.seed ) lowerCamelCase_ =torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) lowerCamelCase_ =torch.cuda.device_count() logger.info('''device: {}, n_gpu {}'''.format(__snake_case , __snake_case ) ) if not args.do_train and not args.do_eval: raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' ) if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) # Load tokenizer and model # This loading functions also add new tokens and embeddings called `special tokens` # These new embeddings will be fine-tuned on the RocStories dataset lowerCamelCase_ =['''_start_''', '''_delimiter_''', '''_classify_'''] lowerCamelCase_ =OpenAIGPTTokenizer.from_pretrained(args.model_name ) tokenizer.add_tokens(__snake_case ) lowerCamelCase_ =tokenizer.convert_tokens_to_ids(__snake_case ) lowerCamelCase_ =OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name ) model.resize_token_embeddings(len(__snake_case ) ) model.to(__snake_case ) # Load and encode the datasets def tokenize_and_encode(__snake_case : Union[str, Any] ): if isinstance(__snake_case , __snake_case ): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(__snake_case ) ) elif isinstance(__snake_case , __snake_case ): return obj return [tokenize_and_encode(__snake_case ) for o in obj] logger.info('''Encoding dataset...''' ) lowerCamelCase_ =load_rocstories_dataset(args.train_dataset ) lowerCamelCase_ =load_rocstories_dataset(args.eval_dataset ) lowerCamelCase_ =(train_dataset, eval_dataset) lowerCamelCase_ =tokenize_and_encode(__snake_case ) # Compute the max input length for the Transformer lowerCamelCase_ =model.config.n_positions // 2 - 2 lowerCamelCase_ =max( len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3 for dataset in encoded_datasets for story, conta, conta, _ in dataset ) lowerCamelCase_ =min(__snake_case , model.config.n_positions ) # Max size of input for the pre-trained model # Prepare inputs tensors and dataloaders lowerCamelCase_ =pre_process_datasets(__snake_case , __snake_case , __snake_case , *__snake_case ) lowerCamelCase_, lowerCamelCase_ =tensor_datasets[0], tensor_datasets[1] lowerCamelCase_ =TensorDataset(*__snake_case ) lowerCamelCase_ =RandomSampler(__snake_case ) lowerCamelCase_ =DataLoader(__snake_case , sampler=__snake_case , batch_size=args.train_batch_size ) lowerCamelCase_ =TensorDataset(*__snake_case ) lowerCamelCase_ =SequentialSampler(__snake_case ) lowerCamelCase_ =DataLoader(__snake_case , sampler=__snake_case , batch_size=args.eval_batch_size ) # Prepare optimizer if args.do_train: if args.max_steps > 0: lowerCamelCase_ =args.max_steps lowerCamelCase_ =args.max_steps // (len(__snake_case ) // args.gradient_accumulation_steps) + 1 else: lowerCamelCase_ =len(__snake_case ) // args.gradient_accumulation_steps * args.num_train_epochs lowerCamelCase_ =list(model.named_parameters() ) lowerCamelCase_ =['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight'''] lowerCamelCase_ =[ { '''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )], '''weight_decay''': args.weight_decay, }, {'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0}, ] lowerCamelCase_ =AdamW(__snake_case , lr=args.learning_rate , eps=args.adam_epsilon ) lowerCamelCase_ =get_linear_schedule_with_warmup( __snake_case , num_warmup_steps=args.warmup_steps , num_training_steps=__snake_case ) if args.do_train: lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =0, 0, None model.train() for _ in trange(int(args.num_train_epochs ) , desc='''Epoch''' ): lowerCamelCase_ =0 lowerCamelCase_ =0 lowerCamelCase_ =tqdm(__snake_case , desc='''Training''' ) for step, batch in enumerate(__snake_case ): lowerCamelCase_ =tuple(t.to(__snake_case ) for t in batch ) lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =batch lowerCamelCase_ =model(__snake_case , mc_token_ids=__snake_case , lm_labels=__snake_case , mc_labels=__snake_case ) lowerCamelCase_ =args.lm_coef * losses[0] + losses[1] loss.backward() optimizer.step() scheduler.step() optimizer.zero_grad() tr_loss += loss.item() lowerCamelCase_ =( loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item() ) nb_tr_steps += 1 lowerCamelCase_ ='''Training loss: {:.2e} lr: {:.2e}'''.format(__snake_case , scheduler.get_lr()[0] ) # Save a trained model if args.do_train: # Save a trained model, configuration and tokenizer lowerCamelCase_ =model.module if hasattr(__snake_case , '''module''' ) else model # Only save the model itself # If we save using the predefined names, we can load using `from_pretrained` lowerCamelCase_ =os.path.join(args.output_dir , __snake_case ) lowerCamelCase_ =os.path.join(args.output_dir , __snake_case ) torch.save(model_to_save.state_dict() , __snake_case ) model_to_save.config.to_json_file(__snake_case ) tokenizer.save_vocabulary(args.output_dir ) # Load a trained model and vocabulary that you have fine-tuned lowerCamelCase_ =OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir ) lowerCamelCase_ =OpenAIGPTTokenizer.from_pretrained(args.output_dir ) model.to(__snake_case ) if args.do_eval: model.eval() lowerCamelCase_, lowerCamelCase_ =0, 0 lowerCamelCase_, lowerCamelCase_ =0, 0 for batch in tqdm(__snake_case , desc='''Evaluating''' ): lowerCamelCase_ =tuple(t.to(__snake_case ) for t in batch ) lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =batch with torch.no_grad(): lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =model( __snake_case , mc_token_ids=__snake_case , lm_labels=__snake_case , mc_labels=__snake_case ) lowerCamelCase_ =mc_logits.detach().cpu().numpy() lowerCamelCase_ =mc_labels.to('''cpu''' ).numpy() lowerCamelCase_ =accuracy(__snake_case , __snake_case ) eval_loss += mc_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0 ) nb_eval_steps += 1 lowerCamelCase_ =eval_loss / nb_eval_steps lowerCamelCase_ =eval_accuracy / nb_eval_examples lowerCamelCase_ =tr_loss / nb_tr_steps if args.do_train else None lowerCamelCase_ ={'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss} lowerCamelCase_ =os.path.join(args.output_dir , '''eval_results.txt''' ) with open(__snake_case , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key in sorted(result.keys() ): logger.info(''' %s = %s''' , __snake_case , str(result[key] ) ) writer.write('''%s = %s\n''' % (key, str(result[key] )) ) if __name__ == "__main__": main()
75
1
'''simple docstring''' import numpy as np import torch from torch.utils.data import DataLoader from accelerate.utils.dataclasses import DistributedType class __UpperCamelCase : def __init__( self, lowerCAmelCase=2, lowerCAmelCase=3, lowerCAmelCase=64, lowerCAmelCase=None ): """simple docstring""" lowerCamelCase_ =np.random.default_rng(lowerCAmelCase ) lowerCamelCase_ =length lowerCamelCase_ =rng.normal(size=(length,) ).astype(np.floataa ) lowerCamelCase_ =a * self.x + b + rng.normal(scale=0.1, size=(length,) ).astype(np.floataa ) def __len__( self ): """simple docstring""" return self.length def __getitem__( self, lowerCAmelCase ): """simple docstring""" return {"x": self.x[i], "y": self.y[i]} class __UpperCamelCase ( torch.nn.Module ): def __init__( self, lowerCAmelCase=0, lowerCAmelCase=0, lowerCAmelCase=False ): """simple docstring""" super().__init__() lowerCamelCase_ =torch.nn.Parameter(torch.tensor([2, 3] ).float() ) lowerCamelCase_ =torch.nn.Parameter(torch.tensor([2, 3] ).float() ) lowerCamelCase_ =True def lowercase__ ( self, lowerCAmelCase=None ): """simple docstring""" if self.first_batch: print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' ) lowerCamelCase_ =False return x * self.a[0] + self.b[0] class __UpperCamelCase ( torch.nn.Module ): def __init__( self, lowerCAmelCase=0, lowerCAmelCase=0, lowerCAmelCase=False ): """simple docstring""" super().__init__() lowerCamelCase_ =torch.nn.Parameter(torch.tensor(lowerCAmelCase ).float() ) lowerCamelCase_ =torch.nn.Parameter(torch.tensor(lowerCAmelCase ).float() ) lowerCamelCase_ =True def lowercase__ ( self, lowerCAmelCase=None ): """simple docstring""" if self.first_batch: print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' ) lowerCamelCase_ =False return x * self.a + self.b def a_ ( __snake_case : List[str] , __snake_case : int = 16 ) -> str: """simple docstring""" from datasets import load_dataset from transformers import AutoTokenizer lowerCamelCase_ =AutoTokenizer.from_pretrained('''bert-base-cased''' ) lowerCamelCase_ ={'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''} lowerCamelCase_ =load_dataset('''csv''' , data_files=__snake_case ) lowerCamelCase_ =datasets['''train'''].unique('''label''' ) lowerCamelCase_ ={v: i for i, v in enumerate(__snake_case )} def tokenize_function(__snake_case : Any ): # max_length=None => use the model max length (it's actually the default) lowerCamelCase_ =tokenizer( examples['''sentence1'''] , examples['''sentence2'''] , truncation=__snake_case , max_length=__snake_case , padding='''max_length''' ) if "label" in examples: lowerCamelCase_ =[label_to_id[l] for l in examples['''label''']] return outputs # Apply the method we just defined to all the examples in all the splits of the dataset lowerCamelCase_ =datasets.map( __snake_case , batched=__snake_case , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , ) def collate_fn(__snake_case : int ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(__snake_case , padding='''max_length''' , max_length=128 , return_tensors='''pt''' ) return tokenizer.pad(__snake_case , padding='''longest''' , return_tensors='''pt''' ) # Instantiate dataloaders. lowerCamelCase_ =DataLoader(tokenized_datasets['''train'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=2 ) lowerCamelCase_ =DataLoader(tokenized_datasets['''validation'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=1 ) return train_dataloader, eval_dataloader
75
'''simple docstring''' import copy import os import cva import numpy as np from matplotlib import pyplot as plt class __UpperCamelCase : def __init__( self ): """simple docstring""" lowerCamelCase_ ='''''' lowerCamelCase_ ='''''' lowerCamelCase_ =[] lowerCamelCase_ =0 lowerCamelCase_ =256 lowerCamelCase_ =0 lowerCamelCase_ =0 lowerCamelCase_ =0 lowerCamelCase_ =0 def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =cva.imread(lowerCAmelCase, 0 ) lowerCamelCase_ =copy.deepcopy(self.img ) lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =plt.hist(self.img.ravel(), 256, [0, 256], label='''x''' ) lowerCamelCase_ =np.sum(lowerCAmelCase ) for i in range(len(lowerCAmelCase ) ): lowerCamelCase_ =x[i] / self.k self.sk += prk lowerCamelCase_ =(self.L - 1) * self.sk if self.rem != 0: lowerCamelCase_ =int(last % last ) lowerCamelCase_ =int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(lowerCAmelCase ) lowerCamelCase_ =int(np.ma.count(self.img ) / self.img[1].size ) lowerCamelCase_ =self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): lowerCamelCase_ =self.img[j][i] if num != self.last_list[num]: lowerCamelCase_ =self.last_list[num] cva.imwrite('''output_data/output.jpg''', self.img ) def lowercase__ ( self ): """simple docstring""" plt.hist(self.img.ravel(), 256, [0, 256] ) def lowercase__ ( self ): """simple docstring""" cva.imshow('''Output-Image''', self.img ) cva.imshow('''Input-Image''', self.original_image ) cva.waitKey(5_000 ) cva.destroyAllWindows() if __name__ == "__main__": a_ : str = os.path.join(os.path.basename(__file__), """image_data/input.jpg""") a_ : Optional[Any] = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
75
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available a_ : Optional[Any] = {"""configuration_yolos""": ["""YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """YolosConfig""", """YolosOnnxConfig"""]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : int = ["""YolosFeatureExtractor"""] a_ : Optional[int] = ["""YolosImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : List[Any] = [ """YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST""", """YolosForObjectDetection""", """YolosModel""", """YolosPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_yolos import YolosFeatureExtractor from .image_processing_yolos import YolosImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_yolos import ( YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST, YolosForObjectDetection, YolosModel, YolosPreTrainedModel, ) else: import sys a_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
75
'''simple docstring''' from collections import UserDict from typing import Union import numpy as np import requests from ..utils import ( add_end_docstrings, logging, ) from .audio_classification import ffmpeg_read from .base import PIPELINE_INIT_ARGS, Pipeline a_ : Any = logging.get_logger(__name__) @add_end_docstrings(lowerCamelCase__ ) class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, **lowerCAmelCase ): """simple docstring""" super().__init__(**lowerCAmelCase ) if self.framework != "pt": raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' ) # No specific FOR_XXX available yet def __call__( self, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return super().__call__(lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ ={} if "candidate_labels" in kwargs: lowerCamelCase_ =kwargs['''candidate_labels'''] if "hypothesis_template" in kwargs: lowerCamelCase_ =kwargs['''hypothesis_template'''] return preprocess_params, {}, {} def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None, lowerCAmelCase="This is a sound of {}." ): """simple docstring""" if isinstance(lowerCAmelCase, lowerCAmelCase ): if audio.startswith('''http://''' ) or audio.startswith('''https://''' ): # We need to actually check for a real protocol, otherwise it's impossible to use a local file # like http_huggingface_co.png lowerCamelCase_ =requests.get(lowerCAmelCase ).content else: with open(lowerCAmelCase, '''rb''' ) as f: lowerCamelCase_ =f.read() if isinstance(lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =ffmpeg_read(lowerCAmelCase, self.feature_extractor.sampling_rate ) if not isinstance(lowerCAmelCase, np.ndarray ): raise ValueError('''We expect a numpy ndarray as input''' ) if len(audio.shape ) != 1: raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' ) lowerCamelCase_ =self.feature_extractor( [audio], sampling_rate=self.feature_extractor.sampling_rate, return_tensors='''pt''' ) lowerCamelCase_ =candidate_labels lowerCamelCase_ =[hypothesis_template.format(lowerCAmelCase ) for x in candidate_labels] lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=self.framework, padding=lowerCAmelCase ) lowerCamelCase_ =[text_inputs] return inputs def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =model_inputs.pop('''candidate_labels''' ) lowerCamelCase_ =model_inputs.pop('''text_inputs''' ) if isinstance(text_inputs[0], lowerCAmelCase ): lowerCamelCase_ =text_inputs[0] else: # Batching case. lowerCamelCase_ =text_inputs[0][0] lowerCamelCase_ =self.model(**lowerCAmelCase, **lowerCAmelCase ) lowerCamelCase_ ={ '''candidate_labels''': candidate_labels, '''logits''': outputs.logits_per_audio, } return model_outputs def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =model_outputs.pop('''candidate_labels''' ) lowerCamelCase_ =model_outputs['''logits'''][0] if self.framework == "pt": lowerCamelCase_ =logits.softmax(dim=0 ) lowerCamelCase_ =probs.tolist() else: raise ValueError('''`tf` framework not supported.''' ) lowerCamelCase_ =[ {'''score''': score, '''label''': candidate_label} for score, candidate_label in sorted(zip(lowerCAmelCase, lowerCAmelCase ), key=lambda lowerCAmelCase : -x[0] ) ] return result
75
1
'''simple docstring''' from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING a_ : Union[str, Any] = logging.get_logger(__name__) a_ : Union[str, Any] = Dict[str, Any] a_ : Optional[Any] = List[Prediction] @add_end_docstrings(lowerCamelCase__ ) class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" super().__init__(*lowerCAmelCase, **lowerCAmelCase ) if self.framework == "tf": raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' ) requires_backends(self, '''vision''' ) self.check_model_type( dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) ) def lowercase__ ( self, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ ={} if "threshold" in kwargs: lowerCamelCase_ =kwargs['''threshold'''] return {}, {}, postprocess_kwargs def __call__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return super().__call__(*lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =load_image(lowerCAmelCase ) lowerCamelCase_ =torch.IntTensor([[image.height, image.width]] ) lowerCamelCase_ =self.image_processor(images=[image], return_tensors='''pt''' ) if self.tokenizer is not None: lowerCamelCase_ =self.tokenizer(text=inputs['''words'''], boxes=inputs['''boxes'''], return_tensors='''pt''' ) lowerCamelCase_ =target_size return inputs def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =model_inputs.pop('''target_size''' ) lowerCamelCase_ =self.model(**lowerCAmelCase ) lowerCamelCase_ =outputs.__class__({'''target_size''': target_size, **outputs} ) if self.tokenizer is not None: lowerCamelCase_ =model_inputs['''bbox'''] return model_outputs def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=0.9 ): """simple docstring""" lowerCamelCase_ =model_outputs['''target_size'''] if self.tokenizer is not None: # This is a LayoutLMForTokenClassification variant. # The OCR got the boxes and the model classified the words. lowerCamelCase_, lowerCamelCase_ =target_size[0].tolist() def unnormalize(lowerCAmelCase ): return self._get_bounding_box( torch.Tensor( [ (width * bbox[0] / 1_000), (height * bbox[1] / 1_000), (width * bbox[2] / 1_000), (height * bbox[3] / 1_000), ] ) ) lowerCamelCase_, lowerCamelCase_ =model_outputs['''logits'''].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 ) lowerCamelCase_ =[self.model.config.idalabel[prediction] for prediction in classes.tolist()] lowerCamelCase_ =[unnormalize(lowerCAmelCase ) for bbox in model_outputs['''bbox'''].squeeze(0 )] lowerCamelCase_ =['''score''', '''label''', '''box'''] lowerCamelCase_ =[dict(zip(lowerCAmelCase, lowerCAmelCase ) ) for vals in zip(scores.tolist(), lowerCAmelCase, lowerCAmelCase ) if vals[0] > threshold] else: # This is a regular ForObjectDetectionModel lowerCamelCase_ =self.image_processor.post_process_object_detection(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =raw_annotations[0] lowerCamelCase_ =raw_annotation['''scores'''] lowerCamelCase_ =raw_annotation['''labels'''] lowerCamelCase_ =raw_annotation['''boxes'''] lowerCamelCase_ =scores.tolist() lowerCamelCase_ =[self.model.config.idalabel[label.item()] for label in labels] lowerCamelCase_ =[self._get_bounding_box(lowerCAmelCase ) for box in boxes] # {"scores": [...], ...} --> [{"score":x, ...}, ...] lowerCamelCase_ =['''score''', '''label''', '''box'''] lowerCamelCase_ =[ dict(zip(lowerCAmelCase, lowerCAmelCase ) ) for vals in zip(raw_annotation['''scores'''], raw_annotation['''labels'''], raw_annotation['''boxes'''] ) ] return annotation def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" if self.framework != "pt": raise ValueError('''The ObjectDetectionPipeline is only available in PyTorch.''' ) lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =box.int().tolist() lowerCamelCase_ ={ '''xmin''': xmin, '''ymin''': ymin, '''xmax''': xmax, '''ymax''': ymax, } return bbox
75
'''simple docstring''' import unittest from pathlib import Path from shutil import copyfile from transformers import SPIECE_UNDERLINE, is_sentencepiece_available from transformers.models.speech_to_text import SpeechaTextTokenizer from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin a_ : str = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_sentencepiece_available(): import sentencepiece as sp a_ : Optional[Any] = 5 a_ : str = 10 @require_sentencepiece @require_tokenizers class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ): lowercase : int =SpeechaTextTokenizer lowercase : int =False lowercase : List[str] =True def lowercase__ ( self ): """simple docstring""" super().setUp() lowerCamelCase_ =sp.SentencePieceProcessor() spm_model.Load(lowerCAmelCase ) lowerCamelCase_ =['''<s>''', '''<pad>''', '''</s>''', '''<unk>'''] vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(lowerCAmelCase ) )] lowerCamelCase_ =dict(zip(lowerCAmelCase, range(len(lowerCAmelCase ) ) ) ) lowerCamelCase_ =Path(self.tmpdirname ) save_json(lowerCAmelCase, save_dir / VOCAB_FILES_NAMES['''vocab_file'''] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(lowerCAmelCase, save_dir / VOCAB_FILES_NAMES['''spm_file'''] ) lowerCamelCase_ =SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''<pad>''' lowerCamelCase_ =1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase ), lowerCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase ), lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0], '''<s>''' ) self.assertEqual(vocab_keys[1], '''<pad>''' ) self.assertEqual(vocab_keys[-1], '''j''' ) self.assertEqual(len(lowerCAmelCase ), 1_001 ) def lowercase__ ( self ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size, 1_001 ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) lowerCamelCase_ =tokenizer.tokenize('''This is a test''' ) self.assertListEqual(lowerCAmelCase, ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCAmelCase ), [289, 50, 14, 174, 386], ) lowerCamelCase_ =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( lowerCAmelCase, [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''], ) lowerCamelCase_ =tokenizer.convert_tokens_to_ids(lowerCAmelCase ) self.assertListEqual(lowerCAmelCase, [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] ) lowerCamelCase_ =tokenizer.convert_ids_to_tokens(lowerCAmelCase ) self.assertListEqual( lowerCAmelCase, [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''], ) @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ={'''input_ids''': [[3_791, 797, 31, 11, 64, 797, 31, 2_429, 433, 12, 1_176, 12, 20, 786, 915, 142, 2_413, 240, 37, 3_238, 797, 31, 11, 35, 93, 915, 142, 2_413, 240, 37, 5_540, 567, 1_276, 93, 37, 610, 40, 62, 455, 657, 1_042, 123, 780, 177, 37, 309, 241, 1_298, 514, 20, 292, 2_737, 114, 2_469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3_388, 511, 459, 4, 3_555, 40, 321, 302, 705, 4, 3_388, 511, 583, 326, 5, 5, 5, 62, 3_310, 560, 177, 2_680, 217, 1_508, 32, 31, 853, 418, 64, 583, 511, 1_605, 62, 35, 93, 560, 177, 2_680, 217, 1_508, 1_521, 64, 583, 511, 519, 62, 20, 1_515, 764, 20, 149, 261, 5_625, 7_972, 20, 5_540, 567, 1_276, 93, 3_925, 1_675, 11, 15, 802, 7_972, 576, 217, 1_508, 11, 35, 93, 1_253, 2_441, 15, 289, 652, 31, 416, 321, 3_842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2_681, 1_153, 3_434, 20, 5_540, 37, 567, 126, 1_253, 2_441, 3_376, 449, 210, 431, 1_563, 177, 767, 5_540, 11, 1_203, 472, 11, 2_953, 685, 285, 364, 706, 1_153, 20, 6_799, 20, 2_869, 20, 4_464, 126, 40, 2_429, 20, 1_040, 866, 2_664, 418, 20, 318, 20, 1_726, 186, 20, 265, 522, 35, 93, 2_191, 4_634, 20, 1_040, 12, 6_799, 15, 228, 2_356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_575, 2_666, 684, 1_582, 1_176, 12, 627, 149, 619, 20, 4_902, 563, 11, 20, 149, 261, 3_420, 2_356, 174, 142, 4_714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase, model_name='''facebook/s2t-small-mustc-en-de-st''', revision='''a14f04cf0776c02f62a8cb800cf7909e15ea23ad''', ) @require_sentencepiece class __UpperCamelCase ( unittest.TestCase ): lowercase : Tuple ='valhalla/s2t_mustc_multilinguial_medium' lowercase : Dict ='C\'est trop cool' lowercase : str ='Esto es genial' @classmethod def lowercase__ ( cls ): """simple docstring""" lowerCamelCase_ =SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name ) return cls def lowercase__ ( self ): """simple docstring""" self.assertEqual(self.tokenizer.lang_code_to_id['''pt'''], 4 ) self.assertEqual(self.tokenizer.lang_code_to_id['''ru'''], 6 ) self.assertEqual(self.tokenizer.lang_code_to_id['''it'''], 9 ) self.assertEqual(self.tokenizer.lang_code_to_id['''de'''], 11 ) def lowercase__ ( self ): """simple docstring""" self.assertEqual(self.tokenizer.vocab_size, 10_000 ) def lowercase__ ( self ): """simple docstring""" self.assertIn(lowerCAmelCase, self.tokenizer.all_special_ids ) lowerCamelCase_ =[ES_CODE, 4, 1_601, 47, 7_647, 2] lowerCamelCase_ =self.tokenizer.decode(lowerCAmelCase, skip_special_tokens=lowerCAmelCase ) lowerCamelCase_ =self.tokenizer.decode(generated_ids[1:], skip_special_tokens=lowerCAmelCase ) self.assertEqual(lowerCAmelCase, lowerCAmelCase ) self.assertNotIn(self.tokenizer.eos_token, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''fr''' lowerCamelCase_ =self.tokenizer(self.french_text ).input_ids self.assertEqual(encoded[0], lowerCAmelCase ) self.assertEqual(encoded[-1], self.tokenizer.eos_token_id ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''fr''' self.assertListEqual(self.tokenizer.prefix_tokens, [FR_CODE] ) lowerCamelCase_ ='''es''' self.assertListEqual(self.tokenizer.prefix_tokens, [ES_CODE] )
75
1
'''simple docstring''' import os import socket from contextlib import contextmanager import torch from ..commands.config.default import write_basic_config # noqa: F401 from ..state import PartialState from .dataclasses import DistributedType from .imports import is_deepspeed_available, is_tpu_available from .transformer_engine import convert_model from .versions import is_torch_version if is_deepspeed_available(): from deepspeed import DeepSpeedEngine if is_tpu_available(check_device=False): import torch_xla.core.xla_model as xm def a_ ( __snake_case : List[str] ) -> Optional[int]: """simple docstring""" if is_torch_version('''<''' , '''2.0.0''' ) or not hasattr(__snake_case , '''_dynamo''' ): return False return isinstance(__snake_case , torch._dynamo.eval_frame.OptimizedModule ) def a_ ( __snake_case : Tuple , __snake_case : bool = True ) -> List[str]: """simple docstring""" lowerCamelCase_ =(torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel) lowerCamelCase_ =is_compiled_module(__snake_case ) if is_compiled: lowerCamelCase_ =model lowerCamelCase_ =model._orig_mod if is_deepspeed_available(): options += (DeepSpeedEngine,) while isinstance(__snake_case , __snake_case ): lowerCamelCase_ =model.module if not keep_fpaa_wrapper: lowerCamelCase_ =getattr(__snake_case , '''forward''' ) lowerCamelCase_ =model.__dict__.pop('''_original_forward''' , __snake_case ) if original_forward is not None: while hasattr(__snake_case , '''__wrapped__''' ): lowerCamelCase_ =forward.__wrapped__ if forward == original_forward: break lowerCamelCase_ =forward if getattr(__snake_case , '''_converted_to_transformer_engine''' , __snake_case ): convert_model(__snake_case , to_transformer_engine=__snake_case ) if is_compiled: lowerCamelCase_ =model lowerCamelCase_ =compiled_model return model def a_ ( ) -> Optional[Any]: """simple docstring""" PartialState().wait_for_everyone() def a_ ( __snake_case : Optional[int] , __snake_case : Optional[Any] ) -> List[str]: """simple docstring""" if PartialState().distributed_type == DistributedType.TPU: xm.save(__snake_case , __snake_case ) elif PartialState().local_process_index == 0: torch.save(__snake_case , __snake_case ) @contextmanager def a_ ( **__snake_case : List[str] ) -> Union[str, Any]: """simple docstring""" for key, value in kwargs.items(): lowerCamelCase_ =str(__snake_case ) yield for key in kwargs: if key.upper() in os.environ: del os.environ[key.upper()] def a_ ( __snake_case : Tuple ) -> List[Any]: """simple docstring""" if not hasattr(__snake_case , '''__qualname__''' ) and not hasattr(__snake_case , '''__name__''' ): lowerCamelCase_ =getattr(__snake_case , '''__class__''' , __snake_case ) if hasattr(__snake_case , '''__qualname__''' ): return obj.__qualname__ if hasattr(__snake_case , '''__name__''' ): return obj.__name__ return str(__snake_case ) def a_ ( __snake_case : Dict , __snake_case : Any ) -> Tuple: """simple docstring""" for key, value in source.items(): if isinstance(__snake_case , __snake_case ): lowerCamelCase_ =destination.setdefault(__snake_case , {} ) merge_dicts(__snake_case , __snake_case ) else: lowerCamelCase_ =value return destination def a_ ( __snake_case : int = None ) -> bool: """simple docstring""" if port is None: lowerCamelCase_ =2_9500 with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s: return s.connect_ex(('''localhost''', port) ) == 0
75
'''simple docstring''' import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch) # also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml # same for Vicuna-13b from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipImageProcessor, InstructBlipConfig, InstructBlipForConditionalGeneration, InstructBlipProcessor, InstructBlipQFormerConfig, InstructBlipVisionConfig, LlamaConfig, LlamaTokenizerFast, TaConfig, TaTokenizerFast, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def a_ ( ) -> Dict: """simple docstring""" lowerCamelCase_ ='''https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg''' lowerCamelCase_ =Image.open(requests.get(__snake_case , stream=__snake_case ).raw ).convert('''RGB''' ) return image def a_ ( __snake_case : Tuple ) -> Dict: """simple docstring""" lowerCamelCase_ =[] # fmt: off # vision encoder rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') ) rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') ) rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') ) rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') ) rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') ) rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') ) # QFormer rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.embeddings.layernorm.weight''') ) rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.embeddings.layernorm.bias''') ) # fmt: on return rename_keys def a_ ( __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : List[Any] ) -> Dict: """simple docstring""" lowerCamelCase_ =dct.pop(__snake_case ) lowerCamelCase_ =val def a_ ( __snake_case : str , __snake_case : Optional[Any] ) -> Any: """simple docstring""" for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases lowerCamelCase_ =state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' ) lowerCamelCase_ =state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' ) # next, set bias in the state dict lowerCamelCase_ =torch.cat((q_bias, torch.zeros_like(__snake_case , requires_grad=__snake_case ), v_bias) ) lowerCamelCase_ =qkv_bias def a_ ( __snake_case : List[str] ) -> Any: """simple docstring""" lowerCamelCase_ =364 if '''coco''' in model_name else 224 lowerCamelCase_ =InstructBlipVisionConfig(image_size=__snake_case ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "t5-xl" in model_name: lowerCamelCase_ =TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: lowerCamelCase_ =TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict() elif "vicuna-7b" in model_name: lowerCamelCase_ =LlamaConfig.from_pretrained('''decapoda-research/llama-7b-hf''' , vocab_size=3_2001 ).to_dict() elif "vicuna-13b" in model_name: lowerCamelCase_ =LlamaConfig.from_pretrained('''decapoda-research/llama-13b-hf''' , vocab_size=3_2001 ).to_dict() else: raise ValueError('''Model name not supported''' ) # the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1 lowerCamelCase_ =InstructBlipQFormerConfig(vocab_size=3_0523 ).to_dict() lowerCamelCase_ =InstructBlipConfig(vision_config=__snake_case , text_config=__snake_case , qformer_config=__snake_case ) return config, image_size @torch.no_grad() def a_ ( __snake_case : Optional[int] , __snake_case : str=None , __snake_case : List[Any]=False ) -> List[str]: """simple docstring""" lowerCamelCase_ =AutoTokenizer.from_pretrained('''bert-base-uncased''' , truncation_side='''left''' ) qformer_tokenizer.add_special_tokens({'''bos_token''': '''[DEC]'''} ) if "t5" in model_name: lowerCamelCase_ =TaTokenizerFast.from_pretrained('''google/flan-t5-xl''' , truncation_side='''left''' ) elif "vicuna" in model_name: # the following was used in the original implementation: # tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left") # tokenizer.add_special_tokens({"pad_token": "[PAD]"}) # tokenizer.add_special_tokens({"bos_token": "</s>"}) # tokenizer.add_special_tokens({"eos_token": "</s>"}) # tokenizer.add_special_tokens({"unk_token": "</s>"}) lowerCamelCase_ =LlamaTokenizerFast.from_pretrained( '''huggyllama/llama-7b''' , truncation_side='''left''' , bos_token='''</s>''' , unk_token='''</s>''' ) tokenizer.add_special_tokens({'''pad_token''': '''[PAD]'''} ) lowerCamelCase_, lowerCamelCase_ =get_blipa_config(__snake_case ) lowerCamelCase_ =InstructBlipForConditionalGeneration(__snake_case ).eval() lowerCamelCase_ ={ '''instructblip-vicuna-7b''': ('''blip2_vicuna_instruct''', '''vicuna7b'''), '''instructblip-vicuna-13b''': ('''blip2_vicuna_instruct''', '''vicuna13b'''), '''instructblip-flan-t5-xl''': ('''blip2_t5_instruct''', '''flant5xl'''), '''instructblip-flan-t5-xxl''': ('''blip2_t5_instruct''', '''flant5xxl'''), } lowerCamelCase_, lowerCamelCase_ =model_name_to_original[model_name] # load original model print('''Loading original model...''' ) lowerCamelCase_ ='''cuda:1''' if torch.cuda.is_available() else '''cpu''' lowerCamelCase_ ='''cuda:2''' if torch.cuda.is_available() else '''cpu''' lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =load_model_and_preprocess( name=__snake_case , model_type=__snake_case , is_eval=__snake_case , device=__snake_case ) original_model.eval() print('''Done!''' ) # update state dict keys lowerCamelCase_ =original_model.state_dict() lowerCamelCase_ =create_rename_keys(__snake_case ) for src, dest in rename_keys: rename_key(__snake_case , __snake_case , __snake_case ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): lowerCamelCase_ =state_dict.pop(__snake_case ) if key.startswith('''Qformer.bert''' ): lowerCamelCase_ =key.replace('''Qformer.bert''' , '''qformer''' ) if "attention.self" in key: lowerCamelCase_ =key.replace('''self''' , '''attention''' ) if "llm_proj" in key: lowerCamelCase_ =key.replace('''llm_proj''' , '''language_projection''' ) if "t5_proj" in key: lowerCamelCase_ =key.replace('''t5_proj''' , '''language_projection''' ) if key.startswith('''llm_model''' ): lowerCamelCase_ =key.replace('''llm_model''' , '''language_model''' ) if key.startswith('''t5''' ): lowerCamelCase_ =key.replace('''t5''' , '''language''' ) lowerCamelCase_ =val # read in qv biases read_in_q_v_bias(__snake_case , __snake_case ) # note: weights get loaded in torch.float32 by default hf_model.load_state_dict(__snake_case , strict=__snake_case ) lowerCamelCase_ =load_demo_image() lowerCamelCase_ ='''What is unusual about this image?''' # create processor lowerCamelCase_ =BlipImageProcessor( size={'''height''': image_size, '''width''': image_size} , image_mean=__snake_case , image_std=__snake_case ) lowerCamelCase_ =InstructBlipProcessor( image_processor=__snake_case , tokenizer=__snake_case , qformer_tokenizer=__snake_case , ) lowerCamelCase_ =processor(images=__snake_case , text=__snake_case , return_tensors='''pt''' ).to(__snake_case ) # make sure processor creates exact same pixel values lowerCamelCase_ =vis_processors['''eval'''](__snake_case ).unsqueeze(0 ).to(__snake_case ) lowerCamelCase_ =inputs.pixel_values assert torch.allclose(original_pixel_values.to(pixel_values.device ) , __snake_case ) original_model.to(__snake_case ) hf_model.to(__snake_case ) with torch.no_grad(): if "vicuna" in model_name: lowerCamelCase_ =original_model({'''image''': original_pixel_values, '''text_input''': [prompt]} ).logits lowerCamelCase_ =hf_model(**__snake_case ).logits else: lowerCamelCase_ =original_model( {'''image''': original_pixel_values, '''text_input''': [prompt], '''text_output''': ['''\n''']} ).logits lowerCamelCase_ =tokenizer('''\n''' , return_tensors='''pt''' ).input_ids.to(__snake_case ) lowerCamelCase_ =label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 ) lowerCamelCase_ =hf_model(**__snake_case , labels=__snake_case ).logits print('''First values of original logits:''' , original_logits[0, :3, :3] ) print('''First values of HF logits:''' , logits[0, :3, :3] ) # assert values assert original_logits.shape == logits.shape lowerCamelCase_ =1e-4 if '''vicuna''' in model_name else 1e-5 assert torch.allclose(original_logits.to(logits.device ) , __snake_case , atol=__snake_case ) print('''Looks ok!''' ) print('''Generating with original model...''' ) lowerCamelCase_ =original_model.generate({'''image''': original_pixel_values, '''prompt''': prompt} , num_beams=5 ) # important: we need to cast the weights of the HF model to the appropriate type print('''Generating with HF model...''' ) lowerCamelCase_ =hf_model.generate( **__snake_case , do_sample=__snake_case , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , ) if "vicuna" in model_name: # convert output id 0 to 2 (eos_token_id) # TODO add this in the generate method? lowerCamelCase_ =2 print('''Original generation:''' , __snake_case ) lowerCamelCase_ =processor.batch_decode(__snake_case , skip_special_tokens=__snake_case ) lowerCamelCase_ =[text.strip() for text in output_text] print('''HF generation:''' , __snake_case ) if pytorch_dump_folder_path is not None: processor.save_pretrained(__snake_case ) hf_model.save_pretrained(__snake_case ) if push_to_hub: processor.push_to_hub(F'''Salesforce/{model_name}''' ) hf_model.push_to_hub(F'''Salesforce/{model_name}''' ) if __name__ == "__main__": a_ : Any = argparse.ArgumentParser() a_ : Any = [ """instructblip-vicuna-7b""", """instructblip-vicuna-13b""", """instructblip-flan-t5-xl""", """instructblip-flan-t5-xxl""", ] parser.add_argument( """--model_name""", default="""instructblip-flan-t5-xl""", choices=choices, type=str, help="""Path to hf config.json of model to convert""", ) parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model and processor to the hub after converting""", ) a_ : str = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
75
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available a_ : Dict = { """configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""], """tokenization_xlm""": ["""XLMTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : List[Any] = [ """XLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """XLMForMultipleChoice""", """XLMForQuestionAnswering""", """XLMForQuestionAnsweringSimple""", """XLMForSequenceClassification""", """XLMForTokenClassification""", """XLMModel""", """XLMPreTrainedModel""", """XLMWithLMHeadModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : str = [ """TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXLMForMultipleChoice""", """TFXLMForQuestionAnsweringSimple""", """TFXLMForSequenceClassification""", """TFXLMForTokenClassification""", """TFXLMMainLayer""", """TFXLMModel""", """TFXLMPreTrainedModel""", """TFXLMWithLMHeadModel""", ] if TYPE_CHECKING: from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig from .tokenization_xlm import XLMTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMPreTrainedModel, XLMWithLMHeadModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMMainLayer, TFXLMModel, TFXLMPreTrainedModel, TFXLMWithLMHeadModel, ) else: import sys a_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
75
'''simple docstring''' from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class __UpperCamelCase ( lowerCamelCase__ ): def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return 0.0 def a_ ( __snake_case : np.ndarray , __snake_case : int ) -> tuple[int | float, int | float]: """simple docstring""" lowerCamelCase_ =min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) lowerCamelCase_ =max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def a_ ( __snake_case : FilterType , __snake_case : int ) -> None: """simple docstring""" lowerCamelCase_ =512 lowerCamelCase_ =[1] + [0] * (size - 1) lowerCamelCase_ =[filter_type.process(__snake_case ) for item in inputs] lowerCamelCase_ =[0] * (samplerate - size) # zero-padding outputs += filler lowerCamelCase_ =np.abs(np.fft.fft(__snake_case ) ) lowerCamelCase_ =20 * np.logaa(__snake_case ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('''Frequency (Hz)''' ) plt.xscale('''log''' ) # Display within reasonable bounds lowerCamelCase_ =get_bounds(__snake_case , __snake_case ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel('''Gain (dB)''' ) plt.plot(__snake_case ) plt.show() def a_ ( __snake_case : FilterType , __snake_case : int ) -> None: """simple docstring""" lowerCamelCase_ =512 lowerCamelCase_ =[1] + [0] * (size - 1) lowerCamelCase_ =[filter_type.process(__snake_case ) for item in inputs] lowerCamelCase_ =[0] * (samplerate - size) # zero-padding outputs += filler lowerCamelCase_ =np.angle(np.fft.fft(__snake_case ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('''Frequency (Hz)''' ) plt.xscale('''log''' ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel('''Phase shift (Radians)''' ) plt.plot(np.unwrap(__snake_case , -2 * pi ) ) plt.show()
75
1
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetaImageProcessor class __UpperCamelCase ( unittest.TestCase ): def __init__( self, lowerCAmelCase, lowerCAmelCase=7, lowerCAmelCase=3, lowerCAmelCase=30, lowerCAmelCase=400, lowerCAmelCase=True, lowerCAmelCase=None, lowerCAmelCase=True, lowerCAmelCase=[0.5, 0.5, 0.5], lowerCAmelCase=[0.5, 0.5, 0.5], lowerCAmelCase=True, lowerCAmelCase=1 / 255, lowerCAmelCase=True, ): """simple docstring""" lowerCamelCase_ =size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333} lowerCamelCase_ =parent lowerCamelCase_ =batch_size lowerCamelCase_ =num_channels lowerCamelCase_ =min_resolution lowerCamelCase_ =max_resolution lowerCamelCase_ =do_resize lowerCamelCase_ =size lowerCamelCase_ =do_normalize lowerCamelCase_ =image_mean lowerCamelCase_ =image_std lowerCamelCase_ =do_rescale lowerCamelCase_ =rescale_factor lowerCamelCase_ =do_pad def lowercase__ ( self ): """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=False ): """simple docstring""" if not batched: lowerCamelCase_ =image_inputs[0] if isinstance(lowerCAmelCase, Image.Image ): lowerCamelCase_, lowerCamelCase_ =image.size else: lowerCamelCase_, lowerCamelCase_ =image.shape[1], image.shape[2] if w < h: lowerCamelCase_ =int(self.size['''shortest_edge'''] * h / w ) lowerCamelCase_ =self.size['''shortest_edge'''] elif w > h: lowerCamelCase_ =self.size['''shortest_edge'''] lowerCamelCase_ =int(self.size['''shortest_edge'''] * w / h ) else: lowerCamelCase_ =self.size['''shortest_edge'''] lowerCamelCase_ =self.size['''shortest_edge'''] else: lowerCamelCase_ =[] for image in image_inputs: lowerCamelCase_, lowerCamelCase_ =self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) lowerCamelCase_ =max(lowerCAmelCase, key=lambda lowerCAmelCase : item[0] )[0] lowerCamelCase_ =max(lowerCAmelCase, key=lambda lowerCAmelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ): lowercase : List[str] =DetaImageProcessor if is_vision_available() else None def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =DetaImageProcessingTester(self ) @property def lowercase__ ( self ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase, '''image_mean''' ) ) self.assertTrue(hasattr(lowerCAmelCase, '''image_std''' ) ) self.assertTrue(hasattr(lowerCAmelCase, '''do_normalize''' ) ) self.assertTrue(hasattr(lowerCAmelCase, '''do_resize''' ) ) self.assertTrue(hasattr(lowerCAmelCase, '''do_rescale''' ) ) self.assertTrue(hasattr(lowerCAmelCase, '''do_pad''' ) ) self.assertTrue(hasattr(lowerCAmelCase, '''size''' ) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size, {'''shortest_edge''': 18, '''longest_edge''': 1_333} ) self.assertEqual(image_processor.do_pad, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" pass def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase_ =prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase, Image.Image ) # Test not batched input lowerCamelCase_ =image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values lowerCamelCase_, lowerCamelCase_ =self.image_processor_tester.get_expected_values(lowerCAmelCase ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched lowerCamelCase_, lowerCamelCase_ =self.image_processor_tester.get_expected_values(lowerCAmelCase, batched=lowerCAmelCase ) lowerCamelCase_ =image_processing(lowerCAmelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase_ =prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCAmelCase, numpify=lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase, np.ndarray ) # Test not batched input lowerCamelCase_ =image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values lowerCamelCase_, lowerCamelCase_ =self.image_processor_tester.get_expected_values(lowerCAmelCase ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched lowerCamelCase_ =image_processing(lowerCAmelCase, return_tensors='''pt''' ).pixel_values lowerCamelCase_, lowerCamelCase_ =self.image_processor_tester.get_expected_values(lowerCAmelCase, batched=lowerCAmelCase ) self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase_ =prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCAmelCase, torchify=lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase, torch.Tensor ) # Test not batched input lowerCamelCase_ =image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values lowerCamelCase_, lowerCamelCase_ =self.image_processor_tester.get_expected_values(lowerCAmelCase ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched lowerCamelCase_ =image_processing(lowerCAmelCase, return_tensors='''pt''' ).pixel_values lowerCamelCase_, lowerCamelCase_ =self.image_processor_tester.get_expected_values(lowerCAmelCase, batched=lowerCAmelCase ) self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''', '''r''' ) as f: lowerCamelCase_ =json.loads(f.read() ) lowerCamelCase_ ={'''image_id''': 39_769, '''annotations''': target} # encode them lowerCamelCase_ =DetaImageProcessor() lowerCamelCase_ =image_processing(images=lowerCAmelCase, annotations=lowerCAmelCase, return_tensors='''pt''' ) # verify pixel values lowerCamelCase_ =torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding['''pixel_values'''].shape, lowerCAmelCase ) lowerCamelCase_ =torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3], lowerCAmelCase, atol=1e-4 ) ) # verify area lowerCamelCase_ =torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''], lowerCAmelCase ) ) # verify boxes lowerCamelCase_ =torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape, lowerCAmelCase ) lowerCamelCase_ =torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0], lowerCAmelCase, atol=1e-3 ) ) # verify image_id lowerCamelCase_ =torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''], lowerCAmelCase ) ) # verify is_crowd lowerCamelCase_ =torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''], lowerCAmelCase ) ) # verify class_labels lowerCamelCase_ =torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''], lowerCAmelCase ) ) # verify orig_size lowerCamelCase_ =torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''], lowerCAmelCase ) ) # verify size lowerCamelCase_ =torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''], lowerCAmelCase ) ) @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''', '''r''' ) as f: lowerCamelCase_ =json.loads(f.read() ) lowerCamelCase_ ={'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target} lowerCamelCase_ =pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them lowerCamelCase_ =DetaImageProcessor(format='''coco_panoptic''' ) lowerCamelCase_ =image_processing(images=lowerCAmelCase, annotations=lowerCAmelCase, masks_path=lowerCAmelCase, return_tensors='''pt''' ) # verify pixel values lowerCamelCase_ =torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding['''pixel_values'''].shape, lowerCAmelCase ) lowerCamelCase_ =torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3], lowerCAmelCase, atol=1e-4 ) ) # verify area lowerCamelCase_ =torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''], lowerCAmelCase ) ) # verify boxes lowerCamelCase_ =torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape, lowerCAmelCase ) lowerCamelCase_ =torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0], lowerCAmelCase, atol=1e-3 ) ) # verify image_id lowerCamelCase_ =torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''], lowerCAmelCase ) ) # verify is_crowd lowerCamelCase_ =torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''], lowerCAmelCase ) ) # verify class_labels lowerCamelCase_ =torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''], lowerCAmelCase ) ) # verify masks lowerCamelCase_ =822_873 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item(), lowerCAmelCase ) # verify orig_size lowerCamelCase_ =torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''], lowerCAmelCase ) ) # verify size lowerCamelCase_ =torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''], lowerCAmelCase ) )
75
'''simple docstring''' import os import unittest from transformers import FunnelTokenizer, FunnelTokenizerFast from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ): lowercase : Union[str, Any] =FunnelTokenizer lowercase : List[str] =FunnelTokenizerFast lowercase : Union[str, Any] =True lowercase : int =True def lowercase__ ( self ): """simple docstring""" super().setUp() lowerCamelCase_ =[ '''<unk>''', '''<cls>''', '''<sep>''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] lowerCamelCase_ =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def lowercase__ ( self, **lowerCAmelCase ): """simple docstring""" return FunnelTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase ) def lowercase__ ( self, **lowerCAmelCase ): """simple docstring""" return FunnelTokenizerFast.from_pretrained(self.tmpdirname, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ ='''UNwant\u00E9d,running''' lowerCamelCase_ ='''unwanted, running''' return input_text, output_text def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.tokenizer_class(self.vocab_file ) lowerCamelCase_ =tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(lowerCAmelCase, ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ), [7, 4, 5, 10, 8, 9] ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_tokenizers(do_lower_case=lowerCAmelCase ) for tokenizer in tokenizers: lowerCamelCase_ =tokenizer('''UNwant\u00E9d,running''' ) lowerCamelCase_ =len(inputs['''input_ids'''] ) - 1 self.assertListEqual(inputs['''token_type_ids'''], [2] + [0] * sentence_len ) lowerCamelCase_ =tokenizer('''UNwant\u00E9d,running''', '''UNwant\u00E9d,running''' ) self.assertListEqual(inputs['''token_type_ids'''], [2] + [0] * sentence_len + [1] * sentence_len )
75
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) a_ : Optional[int] = {"""configuration_deit""": ["""DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DeiTConfig""", """DeiTOnnxConfig"""]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Any = ["""DeiTFeatureExtractor"""] a_ : Any = ["""DeiTImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Union[str, Any] = [ """DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """DeiTForImageClassification""", """DeiTForImageClassificationWithTeacher""", """DeiTForMaskedImageModeling""", """DeiTModel""", """DeiTPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Any = [ """TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFDeiTForImageClassification""", """TFDeiTForImageClassificationWithTeacher""", """TFDeiTForMaskedImageModeling""", """TFDeiTModel""", """TFDeiTPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_deit import DeiTFeatureExtractor from .image_processing_deit import DeiTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deit import ( DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, DeiTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deit import ( TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, TFDeiTPreTrainedModel, ) else: import sys a_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
75
'''simple docstring''' import argparse import os import re import torch from flax.traverse_util import flatten_dict from tax import checkpoints from transformers import ( AutoTokenizer, PixaStructConfig, PixaStructForConditionalGeneration, PixaStructImageProcessor, PixaStructProcessor, PixaStructTextConfig, PixaStructVisionConfig, ) def a_ ( __snake_case : Any ) -> int: """simple docstring""" lowerCamelCase_ =checkpoints.load_tax_checkpoint(__snake_case ) lowerCamelCase_ =flatten_dict(__snake_case ) return flax_params def a_ ( __snake_case : Dict ) -> Optional[int]: """simple docstring""" lowerCamelCase_ ={} lowerCamelCase_ ={ '''token_embedder''': '''embeddings''', '''encoder_norm''': '''layernorm''', '''kernel''': '''weight''', '''.out''': '''.output''', '''scale''': '''weight''', '''embedders_0.pos_embedding''': '''row_embedder.weight''', '''embedders_1.pos_embedding''': '''column_embedder.weight''', } lowerCamelCase_ ={ '''query''': '''attention.query''', '''key''': '''attention.key''', '''value''': '''attention.value''', '''output.dense''': '''output''', '''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''', '''pre_self_attention_layer_norm''': '''self_attention.layer_norm''', '''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''', '''mlp.''': '''mlp.DenseReluDense.''', '''pre_mlp_layer_norm''': '''mlp.layer_norm''', '''self_attention.o''': '''self_attention.attention.o''', '''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''', '''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''', '''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''', '''decoder.logits_dense.weight''': '''decoder.lm_head.weight''', } for key in flax_dict.keys(): if "target" in key: # remove the first prefix from the key lowerCamelCase_ ='''.'''.join(key[1:] ) # rename the key for old, new in CONVERSION_MAPPING.items(): lowerCamelCase_ =new_key.replace(__snake_case , __snake_case ) if "decoder" in new_key: for old, new in DECODER_CONVERSION_MAPPING.items(): lowerCamelCase_ =new_key.replace(__snake_case , __snake_case ) if "layers" in new_key and "decoder" not in new_key: # use regex to replace the layer number lowerCamelCase_ =re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , __snake_case ) lowerCamelCase_ =new_key.replace('''encoder''' , '''encoder.encoder''' ) elif "layers" in new_key and "decoder" in new_key: # use regex to replace the layer number lowerCamelCase_ =re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , __snake_case ) lowerCamelCase_ =flax_dict[key] lowerCamelCase_ ={} # convert converted_dict into torch format for key in converted_dict.keys(): if ("embed_tokens" not in key) and ("embedder" not in key): lowerCamelCase_ =torch.from_numpy(converted_dict[key].T ) else: lowerCamelCase_ =torch.from_numpy(converted_dict[key] ) return converted_torch_dict def a_ ( __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Any=False , __snake_case : Optional[int]=False ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ =get_flax_param(__snake_case ) if not use_large: lowerCamelCase_ =PixaStructVisionConfig() lowerCamelCase_ =PixaStructTextConfig() else: lowerCamelCase_ =PixaStructVisionConfig( hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 ) lowerCamelCase_ =PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 ) lowerCamelCase_ =PixaStructConfig( vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=__snake_case ) lowerCamelCase_ =PixaStructForConditionalGeneration(__snake_case ) lowerCamelCase_ =rename_and_convert_flax_params(__snake_case ) model.load_state_dict(__snake_case ) lowerCamelCase_ =AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' ) lowerCamelCase_ =PixaStructImageProcessor() lowerCamelCase_ =PixaStructProcessor(image_processor=__snake_case , tokenizer=__snake_case ) if use_large: lowerCamelCase_ =4096 lowerCamelCase_ =True # mkdir if needed os.makedirs(__snake_case , exist_ok=__snake_case ) model.save_pretrained(__snake_case ) processor.save_pretrained(__snake_case ) print('''Model saved in {}'''.format(__snake_case ) ) if __name__ == "__main__": a_ : Optional[int] = argparse.ArgumentParser() parser.add_argument("""--t5x_checkpoint_path""", default=None, type=str, help="""Path to the original T5x checkpoint.""") parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--use_large""", action="""store_true""", help="""Use large model.""") parser.add_argument("""--is_vqa""", action="""store_true""", help="""Use large model.""") a_ : Tuple = parser.parse_args() convert_pixastruct_original_pytorch_checkpoint_to_hf( args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large )
75
1
'''simple docstring''' a_ : Optional[Any] = { 0: """0""", 1: """1""", 2: """2""", 3: """3""", 4: """4""", 5: """5""", 6: """6""", 7: """7""", 8: """8""", 9: """9""", 10: """a""", 11: """b""", 12: """c""", 13: """d""", 14: """e""", 15: """f""", } def a_ ( __snake_case : float ) -> str: """simple docstring""" assert type(__snake_case ) in (int, float) and decimal == int(__snake_case ) lowerCamelCase_ =int(__snake_case ) lowerCamelCase_ ='''''' lowerCamelCase_ =False if decimal < 0: lowerCamelCase_ =True decimal *= -1 while decimal > 0: lowerCamelCase_, lowerCamelCase_ =divmod(__snake_case , 16 ) lowerCamelCase_ =values[remainder] + hexadecimal lowerCamelCase_ ='''0x''' + hexadecimal if negative: lowerCamelCase_ ='''-''' + hexadecimal return hexadecimal if __name__ == "__main__": import doctest doctest.testmod()
75
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging a_ : Union[str, Any] = logging.get_logger(__name__) if is_vision_available(): import PIL class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Tuple =['pixel_values'] def __init__( self, lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = PILImageResampling.BICUBIC, lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = True, lowerCAmelCase = 1 / 255, lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = True, **lowerCAmelCase, ): """simple docstring""" super().__init__(**lowerCAmelCase ) lowerCamelCase_ =size if size is not None else {'''shortest_edge''': 224} lowerCamelCase_ =get_size_dict(lowerCAmelCase, default_to_square=lowerCAmelCase ) lowerCamelCase_ =crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} lowerCamelCase_ =get_size_dict(lowerCAmelCase, default_to_square=lowerCAmelCase, param_name='''crop_size''' ) lowerCamelCase_ =do_resize lowerCamelCase_ =size lowerCamelCase_ =resample lowerCamelCase_ =do_center_crop lowerCamelCase_ =crop_size lowerCamelCase_ =do_rescale lowerCamelCase_ =rescale_factor lowerCamelCase_ =do_normalize lowerCamelCase_ =image_mean if image_mean is not None else OPENAI_CLIP_MEAN lowerCamelCase_ =image_std if image_std is not None else OPENAI_CLIP_STD lowerCamelCase_ =do_convert_rgb def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = PILImageResampling.BICUBIC, lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =get_size_dict(lowerCAmelCase, default_to_square=lowerCAmelCase ) if "shortest_edge" not in size: raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) lowerCamelCase_ =get_resize_output_image_size(lowerCAmelCase, size=size['''shortest_edge'''], default_to_square=lowerCAmelCase ) return resize(lowerCAmelCase, size=lowerCAmelCase, resample=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =get_size_dict(lowerCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(lowerCAmelCase, size=(size['''height'''], size['''width''']), data_format=lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" return rescale(lowerCAmelCase, scale=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" return normalize(lowerCAmelCase, mean=lowerCAmelCase, std=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = ChannelDimension.FIRST, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =do_resize if do_resize is not None else self.do_resize lowerCamelCase_ =size if size is not None else self.size lowerCamelCase_ =get_size_dict(lowerCAmelCase, param_name='''size''', default_to_square=lowerCAmelCase ) lowerCamelCase_ =resample if resample is not None else self.resample lowerCamelCase_ =do_center_crop if do_center_crop is not None else self.do_center_crop lowerCamelCase_ =crop_size if crop_size is not None else self.crop_size lowerCamelCase_ =get_size_dict(lowerCAmelCase, param_name='''crop_size''', default_to_square=lowerCAmelCase ) lowerCamelCase_ =do_rescale if do_rescale is not None else self.do_rescale lowerCamelCase_ =rescale_factor if rescale_factor is not None else self.rescale_factor lowerCamelCase_ =do_normalize if do_normalize is not None else self.do_normalize lowerCamelCase_ =image_mean if image_mean is not None else self.image_mean lowerCamelCase_ =image_std if image_std is not None else self.image_std lowerCamelCase_ =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb lowerCamelCase_ =make_list_of_images(lowerCAmelCase ) if not valid_images(lowerCAmelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: lowerCamelCase_ =[convert_to_rgb(lowerCAmelCase ) for image in images] # All transformations expect numpy arrays. lowerCamelCase_ =[to_numpy_array(lowerCAmelCase ) for image in images] if do_resize: lowerCamelCase_ =[self.resize(image=lowerCAmelCase, size=lowerCAmelCase, resample=lowerCAmelCase ) for image in images] if do_center_crop: lowerCamelCase_ =[self.center_crop(image=lowerCAmelCase, size=lowerCAmelCase ) for image in images] if do_rescale: lowerCamelCase_ =[self.rescale(image=lowerCAmelCase, scale=lowerCAmelCase ) for image in images] if do_normalize: lowerCamelCase_ =[self.normalize(image=lowerCAmelCase, mean=lowerCAmelCase, std=lowerCAmelCase ) for image in images] lowerCamelCase_ =[to_channel_dimension_format(lowerCAmelCase, lowerCAmelCase ) for image in images] lowerCamelCase_ ={'''pixel_values''': images} return BatchFeature(data=lowerCAmelCase, tensor_type=lowerCAmelCase )
75
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available, is_torch_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow if is_tf_available(): from transformers import ( AutoConfig, BertConfig, GPTaConfig, TaConfig, TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST if is_torch_available(): from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelWithLMHead, BertForMaskedLM, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertModel, GPTaLMHeadModel, RobertaForMaskedLM, TaForConditionalGeneration, ) @is_pt_tf_cross_test class __UpperCamelCase ( unittest.TestCase ): @slow def lowercase__ ( self ): """simple docstring""" for model_name in ["bert-base-uncased"]: lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =TFAutoModel.from_pretrained(lowerCAmelCase, from_pt=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =AutoModel.from_pretrained(lowerCAmelCase, from_tf=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) @slow def lowercase__ ( self ): """simple docstring""" for model_name in ["bert-base-uncased"]: lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =TFAutoModelForPreTraining.from_pretrained(lowerCAmelCase, from_pt=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =AutoModelForPreTraining.from_pretrained(lowerCAmelCase, from_tf=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) @slow def lowercase__ ( self ): """simple docstring""" for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =TFAutoModelForCausalLM.from_pretrained(lowerCAmelCase, from_pt=lowerCAmelCase ) lowerCamelCase_, lowerCamelCase_ =TFAutoModelForCausalLM.from_pretrained( lowerCAmelCase, output_loading_info=lowerCAmelCase, from_pt=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =AutoModelForCausalLM.from_pretrained(lowerCAmelCase, from_tf=lowerCAmelCase ) lowerCamelCase_, lowerCamelCase_ =AutoModelForCausalLM.from_pretrained( lowerCAmelCase, output_loading_info=lowerCAmelCase, from_tf=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) @slow def lowercase__ ( self ): """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase, from_pt=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =AutoModelWithLMHead.from_pretrained(lowerCAmelCase, from_tf=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) @slow def lowercase__ ( self ): """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =TFAutoModelForMaskedLM.from_pretrained(lowerCAmelCase, from_pt=lowerCAmelCase ) lowerCamelCase_, lowerCamelCase_ =TFAutoModelForMaskedLM.from_pretrained( lowerCAmelCase, output_loading_info=lowerCAmelCase, from_pt=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =AutoModelForMaskedLM.from_pretrained(lowerCAmelCase, from_tf=lowerCAmelCase ) lowerCamelCase_, lowerCamelCase_ =AutoModelForMaskedLM.from_pretrained( lowerCAmelCase, output_loading_info=lowerCAmelCase, from_tf=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) @slow def lowercase__ ( self ): """simple docstring""" for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =TFAutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase, from_pt=lowerCAmelCase ) lowerCamelCase_, lowerCamelCase_ =TFAutoModelForSeqaSeqLM.from_pretrained( lowerCAmelCase, output_loading_info=lowerCAmelCase, from_pt=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase, from_tf=lowerCAmelCase ) lowerCamelCase_, lowerCamelCase_ =AutoModelForSeqaSeqLM.from_pretrained( lowerCAmelCase, output_loading_info=lowerCAmelCase, from_tf=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) @slow def lowercase__ ( self ): """simple docstring""" for model_name in ["bert-base-uncased"]: lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =TFAutoModelForSequenceClassification.from_pretrained(lowerCAmelCase, from_pt=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =AutoModelForSequenceClassification.from_pretrained(lowerCAmelCase, from_tf=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) @slow def lowercase__ ( self ): """simple docstring""" for model_name in ["bert-base-uncased"]: lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =TFAutoModelForQuestionAnswering.from_pretrained(lowerCAmelCase, from_pt=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =AutoModelForQuestionAnswering.from_pretrained(lowerCAmelCase, from_tf=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase, from_pt=lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) self.assertEqual(model.num_parameters(), 14_410 ) self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase ), 14_410 ) lowerCamelCase_ =AutoModelWithLMHead.from_pretrained(lowerCAmelCase, from_tf=lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) self.assertEqual(model.num_parameters(), 14_410 ) self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase ), 14_410 ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase, from_pt=lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) self.assertEqual(model.num_parameters(), 14_410 ) self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase ), 14_410 ) lowerCamelCase_ =AutoModelWithLMHead.from_pretrained(lowerCAmelCase, from_tf=lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) self.assertEqual(model.num_parameters(), 14_410 ) self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase ), 14_410 )
75
'''simple docstring''' from __future__ import annotations def a_ ( __snake_case : str , __snake_case : list[str] | None = None , __snake_case : dict[str, float] | None = None , __snake_case : bool = False , ) -> tuple[int, float, str]: """simple docstring""" lowerCamelCase_ =cipher_alphabet or [chr(__snake_case ) for i in range(97 , 123 )] # If the argument is None or the user provided an empty dictionary if not frequencies_dict: # Frequencies of letters in the english language (how much they show up) lowerCamelCase_ ={ '''a''': 0.0_8_4_9_7, '''b''': 0.0_1_4_9_2, '''c''': 0.0_2_2_0_2, '''d''': 0.0_4_2_5_3, '''e''': 0.1_1_1_6_2, '''f''': 0.0_2_2_2_8, '''g''': 0.0_2_0_1_5, '''h''': 0.0_6_0_9_4, '''i''': 0.0_7_5_4_6, '''j''': 0.0_0_1_5_3, '''k''': 0.0_1_2_9_2, '''l''': 0.0_4_0_2_5, '''m''': 0.0_2_4_0_6, '''n''': 0.0_6_7_4_9, '''o''': 0.0_7_5_0_7, '''p''': 0.0_1_9_2_9, '''q''': 0.0_0_0_9_5, '''r''': 0.0_7_5_8_7, '''s''': 0.0_6_3_2_7, '''t''': 0.0_9_3_5_6, '''u''': 0.0_2_7_5_8, '''v''': 0.0_0_9_7_8, '''w''': 0.0_2_5_6_0, '''x''': 0.0_0_1_5_0, '''y''': 0.0_1_9_9_4, '''z''': 0.0_0_0_7_7, } else: # Custom frequencies dictionary lowerCamelCase_ =frequencies_dict if not case_sensitive: lowerCamelCase_ =ciphertext.lower() # Chi squared statistic values lowerCamelCase_ ={} # cycle through all of the shifts for shift in range(len(__snake_case ) ): lowerCamelCase_ ='''''' # decrypt the message with the shift for letter in ciphertext: try: # Try to index the letter in the alphabet lowerCamelCase_ =(alphabet_letters.index(letter.lower() ) - shift) % len( __snake_case ) decrypted_with_shift += ( alphabet_letters[new_key].upper() if case_sensitive and letter.isupper() else alphabet_letters[new_key] ) except ValueError: # Append the character if it isn't in the alphabet decrypted_with_shift += letter lowerCamelCase_ =0.0 # Loop through each letter in the decoded message with the shift for letter in decrypted_with_shift: if case_sensitive: lowerCamelCase_ =letter.lower() if letter in frequencies: # Get the amount of times the letter occurs in the message lowerCamelCase_ =decrypted_with_shift.lower().count(__snake_case ) # Get the excepcted amount of times the letter should appear based # on letter frequencies lowerCamelCase_ =frequencies[letter] * occurrences # Complete the chi squared statistic formula lowerCamelCase_ =((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value else: if letter.lower() in frequencies: # Get the amount of times the letter occurs in the message lowerCamelCase_ =decrypted_with_shift.count(__snake_case ) # Get the excepcted amount of times the letter should appear based # on letter frequencies lowerCamelCase_ =frequencies[letter] * occurrences # Complete the chi squared statistic formula lowerCamelCase_ =((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary lowerCamelCase_ =( chi_squared_statistic, decrypted_with_shift, ) # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic def chi_squared_statistic_values_sorting_key(__snake_case : int ) -> tuple[float, str]: return chi_squared_statistic_values[key] lowerCamelCase_ =min( __snake_case , key=__snake_case , ) # Get all the data from the most likely cipher (key, decoded message) ( ( lowerCamelCase_ ), ( lowerCamelCase_ ), ) =chi_squared_statistic_values[most_likely_cipher] # Return the data on the most likely shift return ( most_likely_cipher, most_likely_cipher_chi_squared_value, decoded_most_likely_cipher, )
75
1
'''simple docstring''' import itertools import random import unittest import numpy as np from transformers import is_speech_available from transformers.testing_utils import require_torch, require_torchaudio from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import SpeechaTextFeatureExtractor a_ : Union[str, Any] = random.Random() def a_ ( __snake_case : int , __snake_case : int=1.0 , __snake_case : Tuple=None , __snake_case : Union[str, Any]=None ) -> str: """simple docstring""" if rng is None: lowerCamelCase_ =global_rng lowerCamelCase_ =[] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class __UpperCamelCase ( unittest.TestCase ): def __init__( self, lowerCAmelCase, lowerCAmelCase=7, lowerCAmelCase=400, lowerCAmelCase=2_000, lowerCAmelCase=24, lowerCAmelCase=24, lowerCAmelCase=0.0, lowerCAmelCase=16_000, lowerCAmelCase=True, lowerCAmelCase=True, ): """simple docstring""" lowerCamelCase_ =parent lowerCamelCase_ =batch_size lowerCamelCase_ =min_seq_length lowerCamelCase_ =max_seq_length lowerCamelCase_ =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) lowerCamelCase_ =feature_size lowerCamelCase_ =num_mel_bins lowerCamelCase_ =padding_value lowerCamelCase_ =sampling_rate lowerCamelCase_ =return_attention_mask lowerCamelCase_ =do_normalize def lowercase__ ( self ): """simple docstring""" return { "feature_size": self.feature_size, "num_mel_bins": self.num_mel_bins, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def lowercase__ ( self, lowerCAmelCase=False, lowerCAmelCase=False ): """simple docstring""" def _flatten(lowerCAmelCase ): return list(itertools.chain(*lowerCAmelCase ) ) if equal_length: lowerCamelCase_ =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size lowerCamelCase_ =[ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff ) ] if numpify: lowerCamelCase_ =[np.asarray(lowerCAmelCase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ): lowercase : Any =SpeechaTextFeatureExtractor if is_speech_available() else None def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =SpeechaTextFeatureExtractionTester(self ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" self.assertTrue(np.all(np.mean(lowerCAmelCase, axis=0 ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase, axis=0 ) - 1 ) < 1e-3 ) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 lowerCamelCase_ =[floats_list((1, x) )[0] for x in range(800, 1_400, 200 )] lowerCamelCase_ =[np.asarray(lowerCAmelCase ) for speech_input in speech_inputs] # Test feature size lowerCamelCase_ =feature_extractor(lowerCAmelCase, padding=lowerCAmelCase, return_tensors='''np''' ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size ) # Test not batched input lowerCamelCase_ =feature_extractor(speech_inputs[0], return_tensors='''np''' ).input_features lowerCamelCase_ =feature_extractor(np_speech_inputs[0], return_tensors='''np''' ).input_features self.assertTrue(np.allclose(lowerCAmelCase, lowerCAmelCase, atol=1e-3 ) ) # Test batched lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''np''' ).input_features lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(lowerCAmelCase, lowerCAmelCase ): self.assertTrue(np.allclose(lowerCAmelCase, lowerCAmelCase, atol=1e-3 ) ) # Test 2-D numpy arrays are batched. lowerCamelCase_ =[floats_list((1, x) )[0] for x in (800, 800, 800)] lowerCamelCase_ =np.asarray(lowerCAmelCase ) lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''np''' ).input_features lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(lowerCAmelCase, lowerCAmelCase ): self.assertTrue(np.allclose(lowerCAmelCase, lowerCAmelCase, atol=1e-3 ) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase_ =[floats_list((1, x) )[0] for x in range(800, 1_400, 200 )] lowerCamelCase_ =['''longest''', '''max_length''', '''do_not_pad'''] lowerCamelCase_ =[None, 16, None] for max_length, padding in zip(lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =feature_extractor( lowerCAmelCase, padding=lowerCAmelCase, max_length=lowerCAmelCase, return_attention_mask=lowerCAmelCase ) lowerCamelCase_ =inputs.input_features lowerCamelCase_ =inputs.attention_mask lowerCamelCase_ =[np.sum(lowerCAmelCase ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase_ =[floats_list((1, x) )[0] for x in range(800, 1_400, 200 )] lowerCamelCase_ =['''longest''', '''max_length''', '''do_not_pad'''] lowerCamelCase_ =[None, 16, None] for max_length, padding in zip(lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =feature_extractor( lowerCAmelCase, max_length=lowerCAmelCase, padding=lowerCAmelCase, return_tensors='''np''', return_attention_mask=lowerCAmelCase ) lowerCamelCase_ =inputs.input_features lowerCamelCase_ =inputs.attention_mask lowerCamelCase_ =[np.sum(lowerCAmelCase ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase_ =[floats_list((1, x) )[0] for x in range(800, 1_400, 200 )] lowerCamelCase_ =feature_extractor( lowerCAmelCase, padding='''max_length''', max_length=4, truncation=lowerCAmelCase, return_tensors='''np''', return_attention_mask=lowerCAmelCase, ) lowerCamelCase_ =inputs.input_features lowerCamelCase_ =inputs.attention_mask lowerCamelCase_ =np.sum(attention_mask == 1, axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1] ) self._check_zero_mean_unit_variance(input_features[2] ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase_ =[floats_list((1, x) )[0] for x in range(800, 1_400, 200 )] lowerCamelCase_ =feature_extractor( lowerCAmelCase, padding='''longest''', max_length=4, truncation=lowerCAmelCase, return_tensors='''np''', return_attention_mask=lowerCAmelCase, ) lowerCamelCase_ =inputs.input_features lowerCamelCase_ =inputs.attention_mask lowerCamelCase_ =np.sum(attention_mask == 1, axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape, (3, 4, 24) ) lowerCamelCase_ =[floats_list((1, x) )[0] for x in range(800, 1_400, 200 )] lowerCamelCase_ =feature_extractor( lowerCAmelCase, padding='''longest''', max_length=16, truncation=lowerCAmelCase, return_tensors='''np''', return_attention_mask=lowerCAmelCase, ) lowerCamelCase_ =inputs.input_features lowerCamelCase_ =inputs.attention_mask lowerCamelCase_ =np.sum(attention_mask == 1, axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape, (3, 6, 24) ) def lowercase__ ( self ): """simple docstring""" import torch lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase_ =np.random.rand(100, 32 ).astype(np.floataa ) lowerCamelCase_ =np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: lowerCamelCase_ =feature_extractor.pad([{'''input_features''': inputs}], return_tensors='''np''' ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) lowerCamelCase_ =feature_extractor.pad([{'''input_features''': inputs}], return_tensors='''pt''' ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" from datasets import load_dataset lowerCamelCase_ =load_dataset('''hf-internal-testing/librispeech_asr_dummy''', '''clean''', split='''validation''' ) # automatic decoding with librispeech lowerCamelCase_ =ds.sort('''id''' ).select(range(lowerCAmelCase ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =np.array([ -1.5_7_4_5, -1.7_7_1_3, -1.7_0_2_0, -1.6_0_6_9, -1.2_2_5_0, -1.1_1_0_5, -0.9_0_7_2, -0.8_2_4_1, -1.2_3_1_0, -0.8_0_9_8, -0.3_3_2_0, -0.4_1_0_1, -0.7_9_8_5, -0.4_9_9_6, -0.8_2_1_3, -0.9_1_2_8, -1.0_4_2_0, -1.1_2_8_6, -1.0_4_4_0, -0.7_9_9_9, -0.8_4_0_5, -1.2_2_7_5, -1.5_4_4_3, -1.4_6_2_5, ] ) # fmt: on lowerCamelCase_ =self._load_datasamples(1 ) lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''pt''' ).input_features self.assertEquals(input_features.shape, (1, 584, 24) ) self.assertTrue(np.allclose(input_features[0, 0, :30], lowerCAmelCase, atol=1e-4 ) )
75
'''simple docstring''' import importlib import inspect import json import os import re import shutil import sys from pathlib import Path from typing import Dict, Optional, Union from urllib import request from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info from packaging import version from .. import __version__ from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging a_ : List[Any] = ( """https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py""" ) a_ : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name def a_ ( ) -> List[str]: """simple docstring""" lowerCamelCase_ ='''https://pypi.org/pypi/diffusers/json''' lowerCamelCase_ =json.loads(request.urlopen(__snake_case ).read() )['''releases'''].keys() return sorted(__snake_case , key=lambda __snake_case : version.Version(__snake_case ) ) def a_ ( ) -> str: """simple docstring""" # This function has already been executed if HF_MODULES_CACHE already is in the Python path. if HF_MODULES_CACHE in sys.path: return sys.path.append(__snake_case ) os.makedirs(__snake_case , exist_ok=__snake_case ) lowerCamelCase_ =Path(__snake_case ) / '''__init__.py''' if not init_path.exists(): init_path.touch() def a_ ( __snake_case : Union[str, os.PathLike] ) -> List[str]: """simple docstring""" init_hf_modules() lowerCamelCase_ =Path(__snake_case ) / name # If the parent module does not exist yet, recursively create it. if not dynamic_module_path.parent.exists(): create_dynamic_module(dynamic_module_path.parent ) os.makedirs(__snake_case , exist_ok=__snake_case ) lowerCamelCase_ =dynamic_module_path / '''__init__.py''' if not init_path.exists(): init_path.touch() def a_ ( __snake_case : Tuple ) -> List[str]: """simple docstring""" with open(__snake_case , '''r''' , encoding='''utf-8''' ) as f: lowerCamelCase_ =f.read() # Imports of the form `import .xxx` lowerCamelCase_ =re.findall('''^\s*import\s+\.(\S+)\s*$''' , __snake_case , flags=re.MULTILINE ) # Imports of the form `from .xxx import yyy` relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , __snake_case , flags=re.MULTILINE ) # Unique-ify return list(set(__snake_case ) ) def a_ ( __snake_case : str ) -> str: """simple docstring""" lowerCamelCase_ =False lowerCamelCase_ =[module_file] lowerCamelCase_ =[] # Let's recurse through all relative imports while not no_change: lowerCamelCase_ =[] for f in files_to_check: new_imports.extend(get_relative_imports(__snake_case ) ) lowerCamelCase_ =Path(__snake_case ).parent lowerCamelCase_ =[str(module_path / m ) for m in new_imports] lowerCamelCase_ =[f for f in new_import_files if f not in all_relative_imports] lowerCamelCase_ =[F'''{f}.py''' for f in new_import_files] lowerCamelCase_ =len(__snake_case ) == 0 all_relative_imports.extend(__snake_case ) return all_relative_imports def a_ ( __snake_case : Union[str, Any] ) -> Optional[int]: """simple docstring""" with open(__snake_case , '''r''' , encoding='''utf-8''' ) as f: lowerCamelCase_ =f.read() # Imports of the form `import xxx` lowerCamelCase_ =re.findall('''^\s*import\s+(\S+)\s*$''' , __snake_case , flags=re.MULTILINE ) # Imports of the form `from xxx import yyy` imports += re.findall('''^\s*from\s+(\S+)\s+import''' , __snake_case , flags=re.MULTILINE ) # Only keep the top-level module lowerCamelCase_ =[imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )] # Unique-ify and test we got them all lowerCamelCase_ =list(set(__snake_case ) ) lowerCamelCase_ =[] for imp in imports: try: importlib.import_module(__snake_case ) except ImportError: missing_packages.append(__snake_case ) if len(__snake_case ) > 0: raise ImportError( '''This modeling file requires the following packages that were not found in your environment: ''' F'''{', '.join(__snake_case )}. Run `pip install {' '.join(__snake_case )}`''' ) return get_relative_imports(__snake_case ) def a_ ( __snake_case : Tuple , __snake_case : Tuple ) -> List[Any]: """simple docstring""" lowerCamelCase_ =module_path.replace(os.path.sep , '''.''' ) lowerCamelCase_ =importlib.import_module(__snake_case ) if class_name is None: return find_pipeline_class(__snake_case ) return getattr(__snake_case , __snake_case ) def a_ ( __snake_case : Dict ) -> Any: """simple docstring""" from ..pipelines import DiffusionPipeline lowerCamelCase_ =dict(inspect.getmembers(__snake_case , inspect.isclass ) ) lowerCamelCase_ =None for cls_name, cls in cls_members.items(): if ( cls_name != DiffusionPipeline.__name__ and issubclass(cls , __snake_case ) and cls.__module__.split('''.''' )[0] != "diffusers" ): if pipeline_class is not None: raise ValueError( F'''Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:''' F''' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in''' F''' {loaded_module}.''' ) lowerCamelCase_ =cls return pipeline_class def a_ ( __snake_case : Union[str, os.PathLike] , __snake_case : str , __snake_case : Optional[Union[str, os.PathLike]] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[Dict[str, str]] = None , __snake_case : Optional[Union[bool, str]] = None , __snake_case : Optional[str] = None , __snake_case : bool = False , ) -> Optional[int]: """simple docstring""" lowerCamelCase_ =str(__snake_case ) lowerCamelCase_ =os.path.join(__snake_case , __snake_case ) if os.path.isfile(__snake_case ): lowerCamelCase_ =module_file_or_url lowerCamelCase_ ='''local''' elif pretrained_model_name_or_path.count('''/''' ) == 0: lowerCamelCase_ =get_diffusers_versions() # cut ".dev0" lowerCamelCase_ ='''v''' + '''.'''.join(__version__.split('''.''' )[:3] ) # retrieve github version that matches if revision is None: lowerCamelCase_ =latest_version if latest_version[1:] in available_versions else '''main''' logger.info(F'''Defaulting to latest_version: {revision}.''' ) elif revision in available_versions: lowerCamelCase_ =F'''v{revision}''' elif revision == "main": lowerCamelCase_ =revision else: raise ValueError( F'''`custom_revision`: {revision} does not exist. Please make sure to choose one of''' F''' {', '.join(available_versions + ['main'] )}.''' ) # community pipeline on GitHub lowerCamelCase_ =COMMUNITY_PIPELINES_URL.format(revision=__snake_case , pipeline=__snake_case ) try: lowerCamelCase_ =cached_download( __snake_case , cache_dir=__snake_case , force_download=__snake_case , proxies=__snake_case , resume_download=__snake_case , local_files_only=__snake_case , use_auth_token=__snake_case , ) lowerCamelCase_ ='''git''' lowerCamelCase_ =pretrained_model_name_or_path + '''.py''' except EnvironmentError: logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' ) raise else: try: # Load from URL or cache if already cached lowerCamelCase_ =hf_hub_download( __snake_case , __snake_case , cache_dir=__snake_case , force_download=__snake_case , proxies=__snake_case , resume_download=__snake_case , local_files_only=__snake_case , use_auth_token=__snake_case , ) lowerCamelCase_ =os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) ) except EnvironmentError: logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' ) raise # Check we have all the requirements in our environment lowerCamelCase_ =check_imports(__snake_case ) # Now we move the module inside our cached dynamic modules. lowerCamelCase_ =DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule create_dynamic_module(__snake_case ) lowerCamelCase_ =Path(__snake_case ) / full_submodule if submodule == "local" or submodule == "git": # We always copy local files (we could hash the file to see if there was a change, and give them the name of # that hash, to only copy when there is a modification but it seems overkill for now). # The only reason we do the copy is to avoid putting too many folders in sys.path. shutil.copy(__snake_case , submodule_path / module_file ) for module_needed in modules_needed: lowerCamelCase_ =F'''{module_needed}.py''' shutil.copy(os.path.join(__snake_case , __snake_case ) , submodule_path / module_needed ) else: # Get the commit hash # TODO: we will get this info in the etag soon, so retrieve it from there and not here. if isinstance(__snake_case , __snake_case ): lowerCamelCase_ =use_auth_token elif use_auth_token is True: lowerCamelCase_ =HfFolder.get_token() else: lowerCamelCase_ =None lowerCamelCase_ =model_info(__snake_case , revision=__snake_case , token=__snake_case ).sha # The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the # benefit of versioning. lowerCamelCase_ =submodule_path / commit_hash lowerCamelCase_ =full_submodule + os.path.sep + commit_hash create_dynamic_module(__snake_case ) if not (submodule_path / module_file).exists(): shutil.copy(__snake_case , submodule_path / module_file ) # Make sure we also have every file with relative for module_needed in modules_needed: if not (submodule_path / module_needed).exists(): get_cached_module_file( __snake_case , F'''{module_needed}.py''' , cache_dir=__snake_case , force_download=__snake_case , resume_download=__snake_case , proxies=__snake_case , use_auth_token=__snake_case , revision=__snake_case , local_files_only=__snake_case , ) return os.path.join(__snake_case , __snake_case ) def a_ ( __snake_case : Union[str, os.PathLike] , __snake_case : str , __snake_case : Optional[str] = None , __snake_case : Optional[Union[str, os.PathLike]] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[Dict[str, str]] = None , __snake_case : Optional[Union[bool, str]] = None , __snake_case : Optional[str] = None , __snake_case : bool = False , **__snake_case : Optional[int] , ) -> Optional[int]: """simple docstring""" lowerCamelCase_ =get_cached_module_file( __snake_case , __snake_case , cache_dir=__snake_case , force_download=__snake_case , resume_download=__snake_case , proxies=__snake_case , use_auth_token=__snake_case , revision=__snake_case , local_files_only=__snake_case , ) return get_class_in_module(__snake_case , final_module.replace('''.py''' , '''''' ) )
75
1
'''simple docstring''' from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker a_ : Dict = """CompVis/stable-diffusion-v1-1""" a_ : str = """CompVis/stable-diffusion-v1-2""" a_ : Any = """CompVis/stable-diffusion-v1-3""" a_ : str = """CompVis/stable-diffusion-v1-4""" class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = True, ): """simple docstring""" super()._init_() lowerCamelCase_ =StableDiffusionPipeline.from_pretrained(lowerCAmelCase ) lowerCamelCase_ =StableDiffusionPipeline.from_pretrained(lowerCAmelCase ) lowerCamelCase_ =StableDiffusionPipeline.from_pretrained(lowerCAmelCase ) lowerCamelCase_ =StableDiffusionPipeline( vae=lowerCAmelCase, text_encoder=lowerCAmelCase, tokenizer=lowerCAmelCase, unet=lowerCAmelCase, scheduler=lowerCAmelCase, safety_checker=lowerCAmelCase, feature_extractor=lowerCAmelCase, requires_safety_checker=lowerCAmelCase, ) self.register_modules(pipelinea=self.pipea, pipelinea=self.pipea, pipelinea=self.pipea, pipelinea=self.pipea ) @property def lowercase__ ( self ): """simple docstring""" return {k: getattr(self, lowerCAmelCase ) for k in self.config.keys() if not k.startswith('''_''' )} def lowercase__ ( self, lowerCAmelCase = "auto" ): """simple docstring""" if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory lowerCamelCase_ =self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" self.enable_attention_slicing(lowerCAmelCase ) @torch.no_grad() def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = 512, lowerCAmelCase = 512, lowerCAmelCase = 50, lowerCAmelCase = 7.5, lowerCAmelCase = None, lowerCAmelCase = 1, lowerCAmelCase = 0.0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = "pil", lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = 1, **lowerCAmelCase, ): """simple docstring""" return self.pipea( prompt=lowerCAmelCase, height=lowerCAmelCase, width=lowerCAmelCase, num_inference_steps=lowerCAmelCase, guidance_scale=lowerCAmelCase, negative_prompt=lowerCAmelCase, num_images_per_prompt=lowerCAmelCase, eta=lowerCAmelCase, generator=lowerCAmelCase, latents=lowerCAmelCase, output_type=lowerCAmelCase, return_dict=lowerCAmelCase, callback=lowerCAmelCase, callback_steps=lowerCAmelCase, **lowerCAmelCase, ) @torch.no_grad() def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = 512, lowerCAmelCase = 512, lowerCAmelCase = 50, lowerCAmelCase = 7.5, lowerCAmelCase = None, lowerCAmelCase = 1, lowerCAmelCase = 0.0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = "pil", lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = 1, **lowerCAmelCase, ): """simple docstring""" return self.pipea( prompt=lowerCAmelCase, height=lowerCAmelCase, width=lowerCAmelCase, num_inference_steps=lowerCAmelCase, guidance_scale=lowerCAmelCase, negative_prompt=lowerCAmelCase, num_images_per_prompt=lowerCAmelCase, eta=lowerCAmelCase, generator=lowerCAmelCase, latents=lowerCAmelCase, output_type=lowerCAmelCase, return_dict=lowerCAmelCase, callback=lowerCAmelCase, callback_steps=lowerCAmelCase, **lowerCAmelCase, ) @torch.no_grad() def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = 512, lowerCAmelCase = 512, lowerCAmelCase = 50, lowerCAmelCase = 7.5, lowerCAmelCase = None, lowerCAmelCase = 1, lowerCAmelCase = 0.0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = "pil", lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = 1, **lowerCAmelCase, ): """simple docstring""" return self.pipea( prompt=lowerCAmelCase, height=lowerCAmelCase, width=lowerCAmelCase, num_inference_steps=lowerCAmelCase, guidance_scale=lowerCAmelCase, negative_prompt=lowerCAmelCase, num_images_per_prompt=lowerCAmelCase, eta=lowerCAmelCase, generator=lowerCAmelCase, latents=lowerCAmelCase, output_type=lowerCAmelCase, return_dict=lowerCAmelCase, callback=lowerCAmelCase, callback_steps=lowerCAmelCase, **lowerCAmelCase, ) @torch.no_grad() def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = 512, lowerCAmelCase = 512, lowerCAmelCase = 50, lowerCAmelCase = 7.5, lowerCAmelCase = None, lowerCAmelCase = 1, lowerCAmelCase = 0.0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = "pil", lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = 1, **lowerCAmelCase, ): """simple docstring""" return self.pipea( prompt=lowerCAmelCase, height=lowerCAmelCase, width=lowerCAmelCase, num_inference_steps=lowerCAmelCase, guidance_scale=lowerCAmelCase, negative_prompt=lowerCAmelCase, num_images_per_prompt=lowerCAmelCase, eta=lowerCAmelCase, generator=lowerCAmelCase, latents=lowerCAmelCase, output_type=lowerCAmelCase, return_dict=lowerCAmelCase, callback=lowerCAmelCase, callback_steps=lowerCAmelCase, **lowerCAmelCase, ) @torch.no_grad() def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = 512, lowerCAmelCase = 512, lowerCAmelCase = 50, lowerCAmelCase = 7.5, lowerCAmelCase = None, lowerCAmelCase = 1, lowerCAmelCase = 0.0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = "pil", lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = 1, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ ='''cuda''' if torch.cuda.is_available() else '''cpu''' self.to(lowerCAmelCase ) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(f'''`height` and `width` must be divisible by 8 but are {height} and {width}.''' ) # Get first result from Stable Diffusion Checkpoint v1.1 lowerCamelCase_ =self.textaimg_sda_a( prompt=lowerCAmelCase, height=lowerCAmelCase, width=lowerCAmelCase, num_inference_steps=lowerCAmelCase, guidance_scale=lowerCAmelCase, negative_prompt=lowerCAmelCase, num_images_per_prompt=lowerCAmelCase, eta=lowerCAmelCase, generator=lowerCAmelCase, latents=lowerCAmelCase, output_type=lowerCAmelCase, return_dict=lowerCAmelCase, callback=lowerCAmelCase, callback_steps=lowerCAmelCase, **lowerCAmelCase, ) # Get first result from Stable Diffusion Checkpoint v1.2 lowerCamelCase_ =self.textaimg_sda_a( prompt=lowerCAmelCase, height=lowerCAmelCase, width=lowerCAmelCase, num_inference_steps=lowerCAmelCase, guidance_scale=lowerCAmelCase, negative_prompt=lowerCAmelCase, num_images_per_prompt=lowerCAmelCase, eta=lowerCAmelCase, generator=lowerCAmelCase, latents=lowerCAmelCase, output_type=lowerCAmelCase, return_dict=lowerCAmelCase, callback=lowerCAmelCase, callback_steps=lowerCAmelCase, **lowerCAmelCase, ) # Get first result from Stable Diffusion Checkpoint v1.3 lowerCamelCase_ =self.textaimg_sda_a( prompt=lowerCAmelCase, height=lowerCAmelCase, width=lowerCAmelCase, num_inference_steps=lowerCAmelCase, guidance_scale=lowerCAmelCase, negative_prompt=lowerCAmelCase, num_images_per_prompt=lowerCAmelCase, eta=lowerCAmelCase, generator=lowerCAmelCase, latents=lowerCAmelCase, output_type=lowerCAmelCase, return_dict=lowerCAmelCase, callback=lowerCAmelCase, callback_steps=lowerCAmelCase, **lowerCAmelCase, ) # Get first result from Stable Diffusion Checkpoint v1.4 lowerCamelCase_ =self.textaimg_sda_a( prompt=lowerCAmelCase, height=lowerCAmelCase, width=lowerCAmelCase, num_inference_steps=lowerCAmelCase, guidance_scale=lowerCAmelCase, negative_prompt=lowerCAmelCase, num_images_per_prompt=lowerCAmelCase, eta=lowerCAmelCase, generator=lowerCAmelCase, latents=lowerCAmelCase, output_type=lowerCAmelCase, return_dict=lowerCAmelCase, callback=lowerCAmelCase, callback_steps=lowerCAmelCase, **lowerCAmelCase, ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
75
'''simple docstring''' a_ : Any = [ 9_99, 8_00, 7_99, 6_00, 5_99, 5_00, 4_00, 3_99, 3_77, 3_55, 3_33, 3_11, 2_88, 2_66, 2_44, 2_22, 2_00, 1_99, 1_77, 1_55, 1_33, 1_11, 88, 66, 44, 22, 0, ] a_ : Any = [ 9_99, 9_76, 9_52, 9_28, 9_05, 8_82, 8_58, 8_57, 8_10, 7_62, 7_15, 7_14, 5_72, 4_29, 4_28, 2_86, 2_85, 2_38, 1_90, 1_43, 1_42, 1_18, 95, 71, 47, 24, 0, ] a_ : Optional[Any] = [ 9_99, 9_88, 9_77, 9_66, 9_55, 9_44, 9_33, 9_22, 9_11, 9_00, 8_99, 8_79, 8_59, 8_40, 8_20, 8_00, 7_99, 7_66, 7_33, 7_00, 6_99, 6_50, 6_00, 5_99, 5_00, 4_99, 4_00, 3_99, 3_50, 3_00, 2_99, 2_66, 2_33, 2_00, 1_99, 1_79, 1_59, 1_40, 1_20, 1_00, 99, 88, 77, 66, 55, 44, 33, 22, 11, 0, ] a_ : str = [ 9_99, 9_95, 9_92, 9_89, 9_85, 9_81, 9_78, 9_75, 9_71, 9_67, 9_64, 9_61, 9_57, 9_56, 9_51, 9_47, 9_42, 9_37, 9_33, 9_28, 9_23, 9_19, 9_14, 9_13, 9_08, 9_03, 8_97, 8_92, 8_87, 8_81, 8_76, 8_71, 8_70, 8_64, 8_58, 8_52, 8_46, 8_40, 8_34, 8_28, 8_27, 8_20, 8_13, 8_06, 7_99, 7_92, 7_85, 7_84, 7_77, 7_70, 7_63, 7_56, 7_49, 7_42, 7_41, 7_33, 7_24, 7_16, 7_07, 6_99, 6_98, 6_88, 6_77, 6_66, 6_56, 6_55, 6_45, 6_34, 6_23, 6_13, 6_12, 5_98, 5_84, 5_70, 5_69, 5_55, 5_41, 5_27, 5_26, 5_05, 4_84, 4_83, 4_62, 4_40, 4_39, 3_96, 3_95, 3_52, 3_51, 3_08, 3_07, 2_64, 2_63, 2_20, 2_19, 1_76, 1_32, 88, 44, 0, ] a_ : Optional[int] = [ 9_99, 9_97, 9_95, 9_92, 9_90, 9_88, 9_86, 9_84, 9_81, 9_79, 9_77, 9_75, 9_72, 9_70, 9_68, 9_66, 9_64, 9_61, 9_59, 9_57, 9_56, 9_54, 9_51, 9_49, 9_46, 9_44, 9_41, 9_39, 9_36, 9_34, 9_31, 9_29, 9_26, 9_24, 9_21, 9_19, 9_16, 9_14, 9_13, 9_10, 9_07, 9_05, 9_02, 8_99, 8_96, 8_93, 8_91, 8_88, 8_85, 8_82, 8_79, 8_77, 8_74, 8_71, 8_70, 8_67, 8_64, 8_61, 8_58, 8_55, 8_52, 8_49, 8_46, 8_43, 8_40, 8_37, 8_34, 8_31, 8_28, 8_27, 8_24, 8_21, 8_17, 8_14, 8_11, 8_08, 8_04, 8_01, 7_98, 7_95, 7_91, 7_88, 7_85, 7_84, 7_80, 7_77, 7_74, 7_70, 7_66, 7_63, 7_60, 7_56, 7_52, 7_49, 7_46, 7_42, 7_41, 7_37, 7_33, 7_30, 7_26, 7_22, 7_18, 7_14, 7_10, 7_07, 7_03, 6_99, 6_98, 6_94, 6_90, 6_85, 6_81, 6_77, 6_73, 6_69, 6_64, 6_60, 6_56, 6_55, 6_50, 6_46, 6_41, 6_36, 6_32, 6_27, 6_22, 6_18, 6_13, 6_12, 6_07, 6_02, 5_96, 5_91, 5_86, 5_80, 5_75, 5_70, 5_69, 5_63, 5_57, 5_51, 5_45, 5_39, 5_33, 5_27, 5_26, 5_19, 5_12, 5_05, 4_98, 4_91, 4_84, 4_83, 4_74, 4_66, 4_57, 4_49, 4_40, 4_39, 4_28, 4_18, 4_07, 3_96, 3_95, 3_81, 3_66, 3_52, 3_51, 3_30, 3_08, 3_07, 2_86, 2_64, 2_63, 2_42, 2_20, 2_19, 1_76, 1_75, 1_32, 1_31, 88, 44, 0, ] a_ : Dict = [ 9_99, 9_91, 9_82, 9_74, 9_66, 9_58, 9_50, 9_41, 9_33, 9_25, 9_16, 9_08, 9_00, 8_99, 8_74, 8_50, 8_25, 8_00, 7_99, 7_00, 6_00, 5_00, 4_00, 3_00, 2_00, 1_00, 0, ] a_ : Tuple = [ 9_99, 9_92, 9_85, 9_78, 9_71, 9_64, 9_57, 9_49, 9_42, 9_35, 9_28, 9_21, 9_14, 9_07, 9_00, 8_99, 8_79, 8_59, 8_40, 8_20, 8_00, 7_99, 7_66, 7_33, 7_00, 6_99, 6_50, 6_00, 5_99, 5_00, 4_99, 4_00, 3_99, 3_00, 2_99, 2_00, 1_99, 1_00, 99, 0, ] a_ : Any = [ 9_99, 9_96, 9_92, 9_89, 9_85, 9_82, 9_79, 9_75, 9_72, 9_68, 9_65, 9_61, 9_58, 9_55, 9_51, 9_48, 9_44, 9_41, 9_38, 9_34, 9_31, 9_27, 9_24, 9_20, 9_17, 9_14, 9_10, 9_07, 9_03, 9_00, 8_99, 8_91, 8_84, 8_76, 8_69, 8_61, 8_53, 8_46, 8_38, 8_30, 8_23, 8_15, 8_08, 8_00, 7_99, 7_88, 7_77, 7_66, 7_55, 7_44, 7_33, 7_22, 7_11, 7_00, 6_99, 6_88, 6_77, 6_66, 6_55, 6_44, 6_33, 6_22, 6_11, 6_00, 5_99, 5_85, 5_71, 5_57, 5_42, 5_28, 5_14, 5_00, 4_99, 4_85, 4_71, 4_57, 4_42, 4_28, 4_14, 4_00, 3_99, 3_79, 3_59, 3_40, 3_20, 3_00, 2_99, 2_79, 2_59, 2_40, 2_20, 2_00, 1_99, 1_66, 1_33, 1_00, 99, 66, 33, 0, ]
75
1
'''simple docstring''' a_ : Any = """Input must be a string of 8 numbers plus letter""" a_ : Union[str, Any] = """TRWAGMYFPDXBNJZSQVHLCKE""" def a_ ( __snake_case : str ) -> bool: """simple docstring""" if not isinstance(__snake_case , __snake_case ): lowerCamelCase_ =F'''Expected string as input, found {type(__snake_case ).__name__}''' raise TypeError(__snake_case ) lowerCamelCase_ =spanish_id.replace('''-''' , '''''' ).upper() if len(__snake_case ) != 9: raise ValueError(__snake_case ) try: lowerCamelCase_ =int(spanish_id_clean[0:8] ) lowerCamelCase_ =spanish_id_clean[8] except ValueError as ex: raise ValueError(__snake_case ) from ex if letter.isdigit(): raise ValueError(__snake_case ) return letter == LOOKUP_LETTERS[number % 23] if __name__ == "__main__": import doctest doctest.testmod()
75
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) a_ : Union[str, Any] = { """configuration_funnel""": ["""FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FunnelConfig"""], """convert_funnel_original_tf_checkpoint_to_pytorch""": [], """tokenization_funnel""": ["""FunnelTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : List[str] = ["""FunnelTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[int] = [ """FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""", """FunnelBaseModel""", """FunnelForMaskedLM""", """FunnelForMultipleChoice""", """FunnelForPreTraining""", """FunnelForQuestionAnswering""", """FunnelForSequenceClassification""", """FunnelForTokenClassification""", """FunnelModel""", """FunnelPreTrainedModel""", """load_tf_weights_in_funnel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[Any] = [ """TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFFunnelBaseModel""", """TFFunnelForMaskedLM""", """TFFunnelForMultipleChoice""", """TFFunnelForPreTraining""", """TFFunnelForQuestionAnswering""", """TFFunnelForSequenceClassification""", """TFFunnelForTokenClassification""", """TFFunnelModel""", """TFFunnelPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig from .tokenization_funnel import FunnelTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_funnel_fast import FunnelTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_funnel import ( FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, FunnelBaseModel, FunnelForMaskedLM, FunnelForMultipleChoice, FunnelForPreTraining, FunnelForQuestionAnswering, FunnelForSequenceClassification, FunnelForTokenClassification, FunnelModel, FunnelPreTrainedModel, load_tf_weights_in_funnel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_funnel import ( TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, TFFunnelPreTrainedModel, ) else: import sys a_ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
75
1
'''simple docstring''' import copy import tempfile import unittest from transformers import MaMaaaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder def a_ ( __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : str , __snake_case : Optional[int]=None , __snake_case : List[str]=None , __snake_case : Optional[Any]=None , __snake_case : Optional[int]=None , __snake_case : List[str]=None , ) -> int: """simple docstring""" if attention_mask is None: lowerCamelCase_ =input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: lowerCamelCase_ =decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: lowerCamelCase_ =torch.ones(config.encoder_layers , config.encoder_attention_heads , device=__snake_case ) if decoder_head_mask is None: lowerCamelCase_ =torch.ones(config.decoder_layers , config.decoder_attention_heads , device=__snake_case ) if cross_attn_head_mask is None: lowerCamelCase_ =torch.ones(config.decoder_layers , config.decoder_attention_heads , device=__snake_case ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } class __UpperCamelCase : def __init__( self, lowerCAmelCase, lowerCAmelCase=13, lowerCAmelCase=7, lowerCAmelCase=True, lowerCAmelCase=False, lowerCAmelCase=99, lowerCAmelCase=16, lowerCAmelCase=2, lowerCAmelCase=4, lowerCAmelCase=4, lowerCAmelCase="relu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=20, lowerCAmelCase=2, lowerCAmelCase=1, lowerCAmelCase=0, ): """simple docstring""" lowerCamelCase_ =parent lowerCamelCase_ =batch_size lowerCamelCase_ =seq_length lowerCamelCase_ =is_training lowerCamelCase_ =use_labels lowerCamelCase_ =vocab_size lowerCamelCase_ =hidden_size lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =intermediate_size lowerCamelCase_ =hidden_act lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =encoder_layerdrop lowerCamelCase_ =decoder_layerdrop lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =eos_token_id lowerCamelCase_ =pad_token_id lowerCamelCase_ =bos_token_id def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) lowerCamelCase_ =self.eos_token_id # Eos Token lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for M2M100 the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input lowerCamelCase_ =input_ids.clamp(self.pad_token_id + 1 ) lowerCamelCase_ =decoder_input_ids.clamp(self.pad_token_id + 1 ) lowerCamelCase_ =self.get_config() lowerCamelCase_ =prepare_mam_aaa_inputs_dict(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) return config, inputs_dict def lowercase__ ( self ): """simple docstring""" return MaMaaaConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, encoder_layerdrop=self.encoder_layerdrop, decoder_layerdrop=self.decoder_layerdrop, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_, lowerCamelCase_ =self.prepare_config_and_inputs() return config, inputs_dict def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =MaMaaaModel(config=lowerCAmelCase ).get_decoder().to(lowerCAmelCase ).eval() lowerCamelCase_ =inputs_dict['''input_ids'''] lowerCamelCase_ =inputs_dict['''attention_mask'''] lowerCamelCase_ =inputs_dict['''head_mask'''] # first forward pass lowerCamelCase_ =model(lowerCAmelCase, attention_mask=lowerCAmelCase, head_mask=lowerCAmelCase, use_cache=lowerCAmelCase ) lowerCamelCase_, lowerCamelCase_ =outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids lowerCamelCase_ =ids_tensor((self.batch_size, 3), config.vocab_size ) lowerCamelCase_ =ids_tensor((self.batch_size, 3), 2 ) # append to next input_ids and lowerCamelCase_ =torch.cat([input_ids, next_tokens], dim=-1 ) lowerCamelCase_ =torch.cat([attention_mask, next_attn_mask], dim=-1 ) lowerCamelCase_ =model(lowerCAmelCase, attention_mask=lowerCAmelCase )['''last_hidden_state'''] lowerCamelCase_ =model(lowerCAmelCase, attention_mask=lowerCAmelCase, past_key_values=lowerCAmelCase )[ '''last_hidden_state''' ] # select random slice lowerCamelCase_ =ids_tensor((1,), output_from_past.shape[-1] ).item() lowerCamelCase_ =output_from_no_past[:, -3:, random_slice_idx].detach() lowerCamelCase_ =output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowerCAmelCase, lowerCAmelCase, atol=1e-2 ) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =MaMaaaModel(config=lowerCAmelCase ).to(lowerCAmelCase ).eval() lowerCamelCase_ =model(**lowerCAmelCase ) lowerCamelCase_ =outputs.encoder_last_hidden_state lowerCamelCase_ =outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: lowerCamelCase_ =model.get_encoder() encoder.save_pretrained(lowerCAmelCase ) lowerCamelCase_ =MaMaaaEncoder.from_pretrained(lowerCAmelCase ).to(lowerCAmelCase ) lowerCamelCase_ =encoder(inputs_dict['''input_ids'''], attention_mask=inputs_dict['''attention_mask'''] )[ 0 ] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 ) with tempfile.TemporaryDirectory() as tmpdirname: lowerCamelCase_ =model.get_decoder() decoder.save_pretrained(lowerCAmelCase ) lowerCamelCase_ =MaMaaaDecoder.from_pretrained(lowerCAmelCase ).to(lowerCAmelCase ) lowerCamelCase_ =decoder( input_ids=inputs_dict['''decoder_input_ids'''], attention_mask=inputs_dict['''decoder_attention_mask'''], encoder_hidden_states=lowerCAmelCase, encoder_attention_mask=inputs_dict['''attention_mask'''], )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 ) @require_torch class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase : Union[str, Any] =( ( MaMaaaModel, MaMaaaForConditionalGeneration, ) if is_torch_available() else () ) lowercase : Dict =(MaMaaaForConditionalGeneration,) if is_torch_available() else () lowercase : Optional[int] =( { 'conversational': MaMaaaForConditionalGeneration, 'feature-extraction': MaMaaaModel, 'summarization': MaMaaaForConditionalGeneration, 'text2text-generation': MaMaaaForConditionalGeneration, 'translation': MaMaaaForConditionalGeneration, } if is_torch_available() else {} ) lowercase : Dict =True lowercase : Tuple =True lowercase : Optional[Any] =False lowercase : int =False def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" if pipeline_test_casse_name == "TranslationPipelineTests": # Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`. # `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer. return True return False def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =MaMaaaModelTester(self ) lowerCamelCase_ =ConfigTester(self, config_class=lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() def lowercase__ ( self ): """simple docstring""" lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: lowerCamelCase_ =model_class(lowerCAmelCase ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowerCAmelCase ) lowerCamelCase_, lowerCamelCase_ =model_class.from_pretrained(lowerCAmelCase, output_loading_info=lowerCAmelCase ) self.assertEqual(info['''missing_keys'''], [] ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration): lowerCamelCase_ =model_class(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =copy.deepcopy(self._prepare_for_class(lowerCAmelCase, lowerCAmelCase ) ) if not self.is_encoder_decoder: lowerCamelCase_ =inputs['''input_ids'''] del inputs["input_ids"] else: lowerCamelCase_ =inputs['''input_ids'''] lowerCamelCase_ =inputs.get('''decoder_input_ids''', lowerCAmelCase ) del inputs["input_ids"] inputs.pop('''decoder_input_ids''', lowerCAmelCase ) lowerCamelCase_ =model.get_input_embeddings() if not self.is_encoder_decoder: lowerCamelCase_ =wte(lowerCAmelCase ) else: lowerCamelCase_ =wte(lowerCAmelCase ) lowerCamelCase_ =wte(lowerCAmelCase ) with torch.no_grad(): model(**lowerCAmelCase )[0] def lowercase__ ( self ): """simple docstring""" lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() lowerCamelCase_ =input_dict['''input_ids'''] lowerCamelCase_ =input_ids.ne(1 ).to(lowerCAmelCase ) lowerCamelCase_ =MaMaaaForConditionalGeneration(lowerCAmelCase ).eval().to(lowerCAmelCase ) if torch_device == "cuda": model.half() model.generate(lowerCAmelCase, attention_mask=lowerCAmelCase ) model.generate(num_beams=4, do_sample=lowerCAmelCase, early_stopping=lowerCAmelCase, num_return_sequences=3 ) def a_ ( __snake_case : Tuple ) -> List[Any]: """simple docstring""" return torch.tensor(__snake_case , dtype=torch.long , device=__snake_case ) a_ : int = 1e-4 @require_torch @require_sentencepiece @require_tokenizers @slow class __UpperCamelCase ( unittest.TestCase ): @cached_property def lowercase__ ( self ): """simple docstring""" return MaMaaaTokenizer.from_pretrained('''facebook/m2m100_418M''' ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =MaMaaaModel.from_pretrained('''facebook/m2m100_418M''' ).to(lowerCAmelCase ) lowerCamelCase_ =_long_tensor([[128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38, 2]] ) lowerCamelCase_ =_long_tensor([[2, 128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38]] ) lowerCamelCase_ =prepare_mam_aaa_inputs_dict(model.config, lowerCAmelCase, lowerCAmelCase ) with torch.no_grad(): lowerCamelCase_ =model(**lowerCAmelCase )[0] lowerCamelCase_ =torch.Size((1, 11, 1_024) ) self.assertEqual(output.shape, lowerCAmelCase ) # change to expected output here lowerCamelCase_ =torch.tensor( [[-0.7_7_8_0, -0.1_6_7_6, 0.1_0_3_8], [-6.7_5_5_6, -1.3_9_9_2, 0.0_5_6_7], [-7.5_3_8_3, -0.5_9_2_0, -0.2_7_7_9]], device=lowerCAmelCase ) self.assertTrue(torch.allclose(output[:, :3, :3], lowerCAmelCase, atol=lowerCAmelCase ) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =MaMaaaForConditionalGeneration.from_pretrained('''facebook/m2m100_418M''' ).to(lowerCAmelCase ) # change to intended input lowerCamelCase_ =_long_tensor([[128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38, 2]] ) lowerCamelCase_ =_long_tensor([[2, 128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38]] ) lowerCamelCase_ =prepare_mam_aaa_inputs_dict(model.config, lowerCAmelCase, lowerCAmelCase ) with torch.no_grad(): lowerCamelCase_ =model(**lowerCAmelCase )[0] lowerCamelCase_ =torch.Size((1, 11, model.config.vocab_size) ) self.assertEqual(output.shape, lowerCAmelCase ) # change to expected output here lowerCamelCase_ =torch.tensor( [[-1.0_4_4_8, -1.0_4_1_1, 3.7_9_9_2], [-3.2_1_9_1, -3.2_3_8_6, -1.3_4_5_1], [-3.6_2_1_0, -3.5_9_9_3, 0.4_9_2_5]], device=lowerCAmelCase ) self.assertTrue(torch.allclose(output[:, :3, :3], lowerCAmelCase, atol=lowerCAmelCase ) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =MaMaaaForConditionalGeneration.from_pretrained('''facebook/m2m100_418M''' ).to(lowerCAmelCase ) lowerCamelCase_ =MaMaaaTokenizer.from_pretrained('''facebook/m2m100_418M''', src_lang='''fr''', tgt_lang='''en''' ) lowerCamelCase_ =[ '''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''', '''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''', '''Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent''' ''' Fabius convoque l\'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de''' ''' l\'ampleur de la surveillance américaine sur l\'ensemble des communications en France.''', ] # The below article tests that we don't add any hypotheses outside of the top n_beams lowerCamelCase_ =tokenizer(lowerCAmelCase, padding=lowerCAmelCase, return_tensors='''pt''' ) lowerCamelCase_ =model.generate( input_ids=dct['''input_ids'''].to(lowerCAmelCase ), attention_mask=dct['''attention_mask'''].to(lowerCAmelCase ), num_beams=5, forced_bos_token_id=tokenizer.get_lang_id('''en''' ), ) lowerCamelCase_ =[ '''The NSA case highlights the total absence of intelligence debate''', '''I think there are two levels of response from the French government.''', '''When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.''' ''' Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all''' ''' communications in France.''', ] lowerCamelCase_ =tokenizer.batch_decode( hypotheses_batch.tolist(), clean_up_tokenization_spaces=lowerCAmelCase, skip_special_tokens=lowerCAmelCase ) assert generated == expected_en
75
'''simple docstring''' import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoProcessor from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wavaveca.test_feature_extraction_wavaveca import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput if is_torch_available(): from transformers import WavaVecaForCTC @require_pyctcdecode class __UpperCamelCase ( unittest.TestCase ): def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split() lowerCamelCase_ =dict(zip(lowerCAmelCase, range(len(lowerCAmelCase ) ) ) ) lowerCamelCase_ ={ '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>''', } lowerCamelCase_ ={ '''feature_size''': 1, '''padding_value''': 0.0, '''sampling_rate''': 16_000, '''return_attention_mask''': False, '''do_normalize''': True, } lowerCamelCase_ =tempfile.mkdtemp() lowerCamelCase_ =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCamelCase_ =os.path.join(self.tmpdirname, lowerCAmelCase ) with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp: fp.write(json.dumps(lowerCAmelCase ) + '''\n''' ) with open(self.feature_extraction_file, '''w''', encoding='''utf-8''' ) as fp: fp.write(json.dumps(lowerCAmelCase ) + '''\n''' ) # load decoder from hub lowerCamelCase_ ='''hf-internal-testing/ngram-beam-search-decoder''' def lowercase__ ( self, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.add_kwargs_tokens_map.copy() kwargs.update(lowerCAmelCase ) return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase ) def lowercase__ ( self, **lowerCAmelCase ): """simple docstring""" return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname, **lowerCAmelCase ) def lowercase__ ( self, **lowerCAmelCase ): """simple docstring""" return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name, **lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_feature_extractor() lowerCamelCase_ =self.get_decoder() lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname ) # tokenizer self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer, lowerCAmelCase ) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor, lowerCAmelCase ) # decoder self.assertEqual(processor.decoder._alphabet.labels, decoder._alphabet.labels ) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set, decoder.model_container[decoder._model_key]._unigram_set, ) self.assertIsInstance(processor.decoder, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =WavaVecaProcessorWithLM( tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor(), decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname ) # make sure that error is thrown when decoder alphabet doesn't match lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained( self.tmpdirname, alpha=5.0, beta=3.0, score_boundary=-7.0, unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha, 5.0 ) self.assertEqual(processor.language_model.beta, 3.0 ) self.assertEqual(processor.language_model.score_boundary, -7.0 ) self.assertEqual(processor.language_model.unk_score_offset, 3 ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(['''xx'''] ) with self.assertRaisesRegex(lowerCAmelCase, '''include''' ): WavaVecaProcessorWithLM( tokenizer=lowerCAmelCase, feature_extractor=self.get_feature_extractor(), decoder=self.get_decoder() ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_feature_extractor() lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_decoder() lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase ) lowerCamelCase_ =floats_list((3, 1_000) ) lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''np''' ) lowerCamelCase_ =processor(lowerCAmelCase, return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2 ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_feature_extractor() lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_decoder() lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase ) lowerCamelCase_ ='''This is a test string''' lowerCamelCase_ =processor(text=lowerCAmelCase ) lowerCamelCase_ =tokenizer(lowerCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key] ) def lowercase__ ( self, lowerCAmelCase=(2, 10, 16), lowerCAmelCase=77 ): """simple docstring""" np.random.seed(lowerCAmelCase ) return np.random.rand(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_feature_extractor() lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_decoder() lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase ) lowerCamelCase_ =self._get_dummy_logits(shape=(10, 16), seed=13 ) lowerCamelCase_ =processor.decode(lowerCAmelCase ) lowerCamelCase_ =decoder.decode_beams(lowerCAmelCase )[0] self.assertEqual(decoded_decoder[0], decoded_processor.text ) self.assertEqual('''</s> <s> </s>''', decoded_processor.text ) self.assertEqual(decoded_decoder[-2], decoded_processor.logit_score ) self.assertEqual(decoded_decoder[-1], decoded_processor.lm_score ) @parameterized.expand([[None], ['''fork'''], ['''spawn''']] ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.get_feature_extractor() lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_decoder() lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase ) lowerCamelCase_ =self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: lowerCamelCase_ =processor.batch_decode(lowerCAmelCase ) else: with get_context(lowerCAmelCase ).Pool() as pool: lowerCamelCase_ =processor.batch_decode(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =list(lowerCAmelCase ) with get_context('''fork''' ).Pool() as p: lowerCamelCase_ =decoder.decode_beams_batch(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =[], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0] ) logit_scores_decoder.append(beams[0][-2] ) lm_scores_decoder.append(beams[0][-1] ) self.assertListEqual(lowerCAmelCase, decoded_processor.text ) self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''], decoded_processor.text ) self.assertListEqual(lowerCAmelCase, decoded_processor.logit_score ) self.assertListEqual(lowerCAmelCase, decoded_processor.lm_score ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_feature_extractor() lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_decoder() lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase ) lowerCamelCase_ =self._get_dummy_logits() lowerCamelCase_ =15 lowerCamelCase_ =-2_0.0 lowerCamelCase_ =-4.0 lowerCamelCase_ =processor.batch_decode( lowerCAmelCase, beam_width=lowerCAmelCase, beam_prune_logp=lowerCAmelCase, token_min_logp=lowerCAmelCase, ) lowerCamelCase_ =decoded_processor_out.text lowerCamelCase_ =list(lowerCAmelCase ) with get_context('''fork''' ).Pool() as pool: lowerCamelCase_ =decoder.decode_beams_batch( lowerCAmelCase, lowerCAmelCase, beam_width=lowerCAmelCase, beam_prune_logp=lowerCAmelCase, token_min_logp=lowerCAmelCase, ) lowerCamelCase_ =[d[0][0] for d in decoded_decoder_out] lowerCamelCase_ =[d[0][2] for d in decoded_decoder_out] lowerCamelCase_ =[d[0][3] for d in decoded_decoder_out] self.assertListEqual(lowerCAmelCase, lowerCAmelCase ) self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''], lowerCAmelCase ) self.assertTrue(np.array_equal(lowerCAmelCase, decoded_processor_out.logit_score ) ) self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7], lowerCAmelCase, atol=1e-3 ) ) self.assertTrue(np.array_equal(lowerCAmelCase, decoded_processor_out.lm_score ) ) self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4], lowerCAmelCase, atol=1e-3 ) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_feature_extractor() lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_decoder() lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase ) lowerCamelCase_ =self._get_dummy_logits() lowerCamelCase_ =2.0 lowerCamelCase_ =5.0 lowerCamelCase_ =-2_0.0 lowerCamelCase_ =True lowerCamelCase_ =processor.batch_decode( lowerCAmelCase, alpha=lowerCAmelCase, beta=lowerCAmelCase, unk_score_offset=lowerCAmelCase, lm_score_boundary=lowerCAmelCase, ) lowerCamelCase_ =decoded_processor_out.text lowerCamelCase_ =list(lowerCAmelCase ) decoder.reset_params( alpha=lowerCAmelCase, beta=lowerCAmelCase, unk_score_offset=lowerCAmelCase, lm_score_boundary=lowerCAmelCase, ) with get_context('''fork''' ).Pool() as pool: lowerCamelCase_ =decoder.decode_beams_batch( lowerCAmelCase, lowerCAmelCase, ) lowerCamelCase_ =[d[0][0] for d in decoded_decoder_out] self.assertListEqual(lowerCAmelCase, lowerCAmelCase ) self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''], lowerCAmelCase ) lowerCamelCase_ =processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha, 2.0 ) self.assertEqual(lm_model.beta, 5.0 ) self.assertEqual(lm_model.unk_score_offset, -2_0.0 ) self.assertEqual(lm_model.score_boundary, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) lowerCamelCase_ =processor.decoder.model_container[processor.decoder._model_key] lowerCamelCase_ =Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() lowerCamelCase_ =os.listdir(lowerCAmelCase ) lowerCamelCase_ =['''alphabet.json''', '''language_model'''] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(lowerCAmelCase, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =snapshot_download('''hf-internal-testing/processor_with_lm''' ) lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained(lowerCAmelCase ) lowerCamelCase_ =processor.decoder.model_container[processor.decoder._model_key] lowerCamelCase_ =Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() lowerCamelCase_ =os.listdir(lowerCAmelCase ) lowerCamelCase_ =os.listdir(lowerCAmelCase ) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(lowerCAmelCase, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) lowerCamelCase_ =AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' ) lowerCamelCase_ =floats_list((3, 1_000) ) lowerCamelCase_ =processor_wavaveca(lowerCAmelCase, return_tensors='''np''' ) lowerCamelCase_ =processor_auto(lowerCAmelCase, return_tensors='''np''' ) for key in input_wavaveca.keys(): self.assertAlmostEqual(input_wavaveca[key].sum(), input_auto[key].sum(), delta=1e-2 ) lowerCamelCase_ =self._get_dummy_logits() lowerCamelCase_ =processor_wavaveca.batch_decode(lowerCAmelCase ) lowerCamelCase_ =processor_auto.batch_decode(lowerCAmelCase ) self.assertListEqual(decoded_wavaveca.text, decoded_auto.text ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_feature_extractor() lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_decoder() lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase ) self.assertListEqual( processor.model_input_names, feature_extractor.model_input_names, msg='''`processor` and `feature_extractor` model input names do not match''', ) @staticmethod def lowercase__ ( lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =[d[key] for d in offsets] return retrieved_list def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) lowerCamelCase_ =self._get_dummy_logits()[0] lowerCamelCase_ =processor.decode(lowerCAmelCase, output_word_offsets=lowerCAmelCase ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ), 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(lowerCAmelCase, lowerCAmelCase ) ) self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''], '''word''' ) ), outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''], '''word''' ), ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''], '''start_offset''' ), [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''], '''end_offset''' ), [1, 3, 5] ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) lowerCamelCase_ =self._get_dummy_logits() lowerCamelCase_ =processor.batch_decode(lowerCAmelCase, output_word_offsets=lowerCAmelCase ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ), 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(lowerCAmelCase, lowerCAmelCase ) ) self.assertListEqual( [''' '''.join(self.get_from_offsets(lowerCAmelCase, '''word''' ) ) for o in outputs['''word_offsets''']], outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0], '''word''' ), ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0], '''start_offset''' ), [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0], '''end_offset''' ), [1, 3, 5] ) @slow @require_torch @require_torchaudio def lowercase__ ( self ): """simple docstring""" import torch lowerCamelCase_ =load_dataset('''common_voice''', '''en''', split='''train''', streaming=lowerCAmelCase ) lowerCamelCase_ =ds.cast_column('''audio''', datasets.Audio(sampling_rate=16_000 ) ) lowerCamelCase_ =iter(lowerCAmelCase ) lowerCamelCase_ =next(lowerCAmelCase ) lowerCamelCase_ =AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) lowerCamelCase_ =WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train lowerCamelCase_ =processor(sample['''audio''']['''array'''], return_tensors='''pt''' ).input_values with torch.no_grad(): lowerCamelCase_ =model(lowerCAmelCase ).logits.cpu().numpy() lowerCamelCase_ =processor.decode(logits[0], output_word_offsets=lowerCAmelCase ) lowerCamelCase_ =model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate lowerCamelCase_ =[ { '''start_time''': d['''start_offset'''] * time_offset, '''end_time''': d['''end_offset'''] * time_offset, '''word''': d['''word'''], } for d in output['''word_offsets'''] ] lowerCamelCase_ ='''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL''' # output words self.assertEqual(''' '''.join(self.get_from_offsets(lowerCAmelCase, '''word''' ) ), lowerCAmelCase ) self.assertEqual(''' '''.join(self.get_from_offsets(lowerCAmelCase, '''word''' ) ), output.text ) # output times lowerCamelCase_ =torch.tensor(self.get_from_offsets(lowerCAmelCase, '''start_time''' ) ) lowerCamelCase_ =torch.tensor(self.get_from_offsets(lowerCAmelCase, '''end_time''' ) ) # fmt: off lowerCamelCase_ =torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] ) lowerCamelCase_ =torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] ) # fmt: on self.assertTrue(torch.allclose(lowerCAmelCase, lowerCAmelCase, atol=0.0_1 ) ) self.assertTrue(torch.allclose(lowerCAmelCase, lowerCAmelCase, atol=0.0_1 ) )
75
1
'''simple docstring''' from typing import Any class __UpperCamelCase : def __init__( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =data lowerCamelCase_ =None def __repr__( self ): """simple docstring""" return f'''Node({self.data})''' class __UpperCamelCase : def __init__( self ): """simple docstring""" lowerCamelCase_ =None def __iter__( self ): """simple docstring""" lowerCamelCase_ =self.head while node: yield node.data lowerCamelCase_ =node.next def __len__( self ): """simple docstring""" return sum(1 for _ in self ) def __repr__( self ): """simple docstring""" return "->".join([str(lowerCAmelCase ) for item in self] ) def __getitem__( self, lowerCAmelCase ): """simple docstring""" if not 0 <= index < len(self ): raise ValueError('''list index out of range.''' ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" if not 0 <= index < len(self ): raise ValueError('''list index out of range.''' ) lowerCamelCase_ =self.head for _ in range(lowerCAmelCase ): lowerCamelCase_ =current.next lowerCamelCase_ =data def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" self.insert_nth(len(self ), lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" self.insert_nth(0, lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" if not 0 <= index <= len(self ): raise IndexError('''list index out of range''' ) lowerCamelCase_ =Node(lowerCAmelCase ) if self.head is None: lowerCamelCase_ =new_node elif index == 0: lowerCamelCase_ =self.head # link new_node to head lowerCamelCase_ =new_node else: lowerCamelCase_ =self.head for _ in range(index - 1 ): lowerCamelCase_ =temp.next lowerCamelCase_ =temp.next lowerCamelCase_ =new_node def lowercase__ ( self ): # print every node data """simple docstring""" print(self ) def lowercase__ ( self ): """simple docstring""" return self.delete_nth(0 ) def lowercase__ ( self ): # delete from tail """simple docstring""" return self.delete_nth(len(self ) - 1 ) def lowercase__ ( self, lowerCAmelCase = 0 ): """simple docstring""" if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError('''List index out of range.''' ) lowerCamelCase_ =self.head # default first node if index == 0: lowerCamelCase_ =self.head.next else: lowerCamelCase_ =self.head for _ in range(index - 1 ): lowerCamelCase_ =temp.next lowerCamelCase_ =temp.next lowerCamelCase_ =temp.next.next return delete_node.data def lowercase__ ( self ): """simple docstring""" return self.head is None def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =None lowerCamelCase_ =self.head while current: # Store the current node's next node. lowerCamelCase_ =current.next # Make the current node's next point backwards lowerCamelCase_ =prev # Make the previous node be the current node lowerCamelCase_ =current # Make the current node the next node (to progress iteration) lowerCamelCase_ =next_node # Return prev in order to put the head at the end lowerCamelCase_ =prev def a_ ( ) -> None: """simple docstring""" lowerCamelCase_ =LinkedList() assert linked_list.is_empty() is True assert str(__snake_case ) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(10 ): assert len(__snake_case ) == i linked_list.insert_nth(__snake_case , i + 1 ) assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(1 , 11 ) ) linked_list.insert_head(0 ) linked_list.insert_tail(11 ) assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(0 , 12 ) ) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9 ) == 10 assert linked_list.delete_tail() == 11 assert len(__snake_case ) == 9 assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(1 , 10 ) ) assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True for i in range(0 , 9 ): lowerCamelCase_ =-i assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True linked_list.reverse() assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(-8 , 1 ) ) def a_ ( ) -> None: """simple docstring""" lowerCamelCase_ =[ -9, 100, Node(7734_5112 ), '''dlrow olleH''', 7, 5555, 0, -1_9_2.5_5_5_5_5, '''Hello, world!''', 7_7.9, Node(10 ), None, None, 1_2.2_0, ] lowerCamelCase_ =LinkedList() for i in test_input: linked_list.insert_tail(__snake_case ) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(__snake_case ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head lowerCamelCase_ =linked_list.delete_head() assert result == -9 assert ( str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail lowerCamelCase_ =linked_list.delete_tail() assert result == 1_2.2 assert ( str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list lowerCamelCase_ =linked_list.delete_nth(10 ) assert result is None assert ( str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node('''Hello again, world!''' ) ) assert ( str(__snake_case ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(__snake_case ) assert ( str(__snake_case ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(__snake_case ) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def a_ ( ) -> Tuple: """simple docstring""" from doctest import testmod testmod() lowerCamelCase_ =LinkedList() linked_list.insert_head(input('''Inserting 1st at head ''' ).strip() ) linked_list.insert_head(input('''Inserting 2nd at head ''' ).strip() ) print('''\nPrint list:''' ) linked_list.print_list() linked_list.insert_tail(input('''\nInserting 1st at tail ''' ).strip() ) linked_list.insert_tail(input('''Inserting 2nd at tail ''' ).strip() ) print('''\nPrint list:''' ) linked_list.print_list() print('''\nDelete head''' ) linked_list.delete_head() print('''Delete tail''' ) linked_list.delete_tail() print('''\nPrint list:''' ) linked_list.print_list() print('''\nReverse linked list''' ) linked_list.reverse() print('''\nPrint list:''' ) linked_list.print_list() print('''\nString representation of linked list:''' ) print(__snake_case ) print('''\nReading/changing Node data using indexing:''' ) print(F'''Element at Position 1: {linked_list[1]}''' ) lowerCamelCase_ =input('''Enter New Value: ''' ).strip() print('''New list:''' ) print(__snake_case ) print(F'''length of linked_list is : {len(__snake_case )}''' ) if __name__ == "__main__": main()
75
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPixaPixPipeline, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase : List[Any] =StableDiffusionInstructPixaPixPipeline lowercase : List[Any] =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'} lowercase : Optional[Any] =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS lowercase : Union[str, Any] =IMAGE_TO_IMAGE_IMAGE_PARAMS lowercase : List[Any] =IMAGE_TO_IMAGE_IMAGE_PARAMS def lowercase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCamelCase_ =UNetaDConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=8, out_channels=4, down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D'''), up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D'''), cross_attention_dim=32, ) lowerCamelCase_ =PNDMScheduler(skip_prk_steps=lowerCAmelCase ) torch.manual_seed(0 ) lowerCamelCase_ =AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], latent_channels=4, ) torch.manual_seed(0 ) lowerCamelCase_ =CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, ) lowerCamelCase_ =CLIPTextModel(lowerCAmelCase ) lowerCamelCase_ =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) lowerCamelCase_ ={ '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=0 ): """simple docstring""" lowerCamelCase_ =floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase ) lowerCamelCase_ =image.cpu().permute(0, 2, 3, 1 )[0] lowerCamelCase_ =Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('''RGB''' ) if str(lowerCAmelCase ).startswith('''mps''' ): lowerCamelCase_ =torch.manual_seed(lowerCAmelCase ) else: lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase ) lowerCamelCase_ ={ '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''image_guidance_scale''': 1, '''output_type''': '''numpy''', } return inputs def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCamelCase_ =self.get_dummy_components() lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase ) lowerCamelCase_ =sd_pipe.to(lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase ) lowerCamelCase_ =sd_pipe(**lowerCAmelCase ).images lowerCamelCase_ =image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCamelCase_ =np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCamelCase_ =self.get_dummy_components() lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase ) lowerCamelCase_ =sd_pipe.to(lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase ) lowerCamelCase_ ='''french fries''' lowerCamelCase_ =sd_pipe(**lowerCAmelCase, negative_prompt=lowerCAmelCase ) lowerCamelCase_ =output.images lowerCamelCase_ =image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCamelCase_ =np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCamelCase_ =self.get_dummy_components() lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase ) lowerCamelCase_ =sd_pipe.to(lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase ) lowerCamelCase_ =[inputs['''prompt''']] * 2 lowerCamelCase_ =np.array(inputs['''image'''] ).astype(np.floataa ) / 2_5_5.0 lowerCamelCase_ =torch.from_numpy(lowerCAmelCase ).unsqueeze(0 ).to(lowerCAmelCase ) lowerCamelCase_ =image / 2 + 0.5 lowerCamelCase_ =image.permute(0, 3, 1, 2 ) lowerCamelCase_ =image.repeat(2, 1, 1, 1 ) lowerCamelCase_ =sd_pipe(**lowerCAmelCase ).images lowerCamelCase_ =image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) lowerCamelCase_ =np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCamelCase_ =self.get_dummy_components() lowerCamelCase_ =EulerAncestralDiscreteScheduler( beta_start=0.0_0_0_8_5, beta_end=0.0_1_2, beta_schedule='''scaled_linear''' ) lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase ) lowerCamelCase_ =sd_pipe.to(lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase ) lowerCamelCase_ =sd_pipe(**lowerCAmelCase ).images lowerCamelCase_ =image[0, -3:, -3:, -1] lowerCamelCase_ =[round(lowerCAmelCase, 4 ) for x in image_slice.flatten().tolist()] print(''','''.join([str(lowerCAmelCase ) for x in slice] ) ) assert image.shape == (1, 32, 32, 3) lowerCamelCase_ =np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def lowercase__ ( self ): """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_dummy_components() lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase ) lowerCamelCase_ =VaeImageProcessor(do_resize=lowerCAmelCase, do_normalize=lowerCAmelCase ) lowerCamelCase_ =pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =pipe(**self.get_dummy_inputs_by_type(lowerCAmelCase, input_image_type='''pt''' ) )[0] lowerCamelCase_ =components['''vae'''] lowerCamelCase_ =self.get_dummy_inputs_by_type(lowerCAmelCase, input_image_type='''pt''' ) for image_param in self.image_latents_params: if image_param in inputs.keys(): lowerCamelCase_ =vae.encode(inputs[image_param] ).latent_dist.mode() lowerCamelCase_ =pipe(**lowerCAmelCase )[0] lowerCamelCase_ =np.abs(out - out_latents_inputs ).max() self.assertLess(lowerCAmelCase, 1e-4, '''passing latents as image input generate different result from passing image''' ) @slow @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): def lowercase__ ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase__ ( self, lowerCAmelCase=0 ): """simple docstring""" lowerCamelCase_ =torch.manual_seed(lowerCAmelCase ) lowerCamelCase_ =load_image( '''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' ) lowerCamelCase_ ={ '''prompt''': '''turn him into a cyborg''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''image_guidance_scale''': 1.0, '''output_type''': '''numpy''', } return inputs def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''', safety_checker=lowerCAmelCase ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) pipe.enable_attention_slicing() lowerCamelCase_ =self.get_inputs() lowerCamelCase_ =pipe(**lowerCAmelCase ).images lowerCamelCase_ =image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) lowerCamelCase_ =np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''', safety_checker=lowerCAmelCase ) lowerCamelCase_ =LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) pipe.enable_attention_slicing() lowerCamelCase_ =self.get_inputs() lowerCamelCase_ =pipe(**lowerCAmelCase ).images lowerCamelCase_ =image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) lowerCamelCase_ =np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''', safety_checker=lowerCAmelCase ) lowerCamelCase_ =DDIMScheduler.from_config(pipe.scheduler.config ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) pipe.enable_attention_slicing() lowerCamelCase_ =self.get_inputs() lowerCamelCase_ =pipe(**lowerCAmelCase ).images lowerCamelCase_ =image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) lowerCamelCase_ =np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =0 def callback_fn(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) -> None: lowerCamelCase_ =True nonlocal number_of_steps number_of_steps += 1 if step == 1: lowerCamelCase_ =latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) lowerCamelCase_ =latents[0, -3:, -3:, -1] lowerCamelCase_ =np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 elif step == 2: lowerCamelCase_ =latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) lowerCamelCase_ =latents[0, -3:, -3:, -1] lowerCamelCase_ =np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 lowerCamelCase_ =False lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''', safety_checker=lowerCAmelCase, torch_dtype=torch.floataa ) lowerCamelCase_ =pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) pipe.enable_attention_slicing() lowerCamelCase_ =self.get_inputs() pipe(**lowerCAmelCase, callback=lowerCAmelCase, callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def lowercase__ ( self ): """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''', safety_checker=lowerCAmelCase, torch_dtype=torch.floataa ) lowerCamelCase_ =pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() lowerCamelCase_ =self.get_inputs() lowerCamelCase_ =pipe(**lowerCAmelCase ) lowerCamelCase_ =torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 10**9 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 lowerCamelCase_ =inputs['''image'''].resize((504, 504) ) lowerCamelCase_ ='''timbrooks/instruct-pix2pix''' lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained( lowerCAmelCase, safety_checker=lowerCAmelCase, ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) pipe.enable_attention_slicing() lowerCamelCase_ =pipe(**lowerCAmelCase ) lowerCamelCase_ =output.images[0] lowerCamelCase_ =image[255:258, 383:386, -1] assert image.shape == (504, 504, 3) lowerCamelCase_ =np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
75
1
'''simple docstring''' import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def a_ ( __snake_case : BertModel , __snake_case : str , __snake_case : str ) -> str: """simple docstring""" lowerCamelCase_ =('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''') lowerCamelCase_ =( ('''layer.''', '''layer_'''), ('''word_embeddings.weight''', '''word_embeddings'''), ('''position_embeddings.weight''', '''position_embeddings'''), ('''token_type_embeddings.weight''', '''token_type_embeddings'''), ('''.''', '''/'''), ('''LayerNorm/weight''', '''LayerNorm/gamma'''), ('''LayerNorm/bias''', '''LayerNorm/beta'''), ('''weight''', '''kernel'''), ) if not os.path.isdir(__snake_case ): os.makedirs(__snake_case ) lowerCamelCase_ =model.state_dict() def to_tf_var_name(__snake_case : str ): for patt, repl in iter(__snake_case ): lowerCamelCase_ =name.replace(__snake_case , __snake_case ) return F'''bert/{name}''' def create_tf_var(__snake_case : np.ndarray , __snake_case : str , __snake_case : tf.Session ): lowerCamelCase_ =tf.dtypes.as_dtype(tensor.dtype ) lowerCamelCase_ =tf.get_variable(dtype=__snake_case , shape=tensor.shape , name=__snake_case , initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(__snake_case ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: lowerCamelCase_ =to_tf_var_name(__snake_case ) lowerCamelCase_ =state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): lowerCamelCase_ =torch_tensor.T lowerCamelCase_ =create_tf_var(tensor=__snake_case , name=__snake_case , session=__snake_case ) tf.keras.backend.set_value(__snake_case , __snake_case ) lowerCamelCase_ =session.run(__snake_case ) print(F'''Successfully created {tf_name}: {np.allclose(__snake_case , __snake_case )}''' ) lowerCamelCase_ =tf.train.Saver(tf.trainable_variables() ) saver.save(__snake_case , os.path.join(__snake_case , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) ) def a_ ( __snake_case : Union[str, Any]=None ) -> Any: """simple docstring""" lowerCamelCase_ =argparse.ArgumentParser() parser.add_argument('''--model_name''' , type=__snake_case , required=__snake_case , help='''model name e.g. bert-base-uncased''' ) parser.add_argument( '''--cache_dir''' , type=__snake_case , default=__snake_case , required=__snake_case , help='''Directory containing pytorch model''' ) parser.add_argument('''--pytorch_model_path''' , type=__snake_case , required=__snake_case , help='''/path/to/<pytorch-model-name>.bin''' ) parser.add_argument('''--tf_cache_dir''' , type=__snake_case , required=__snake_case , help='''Directory in which to save tensorflow model''' ) lowerCamelCase_ =parser.parse_args(__snake_case ) lowerCamelCase_ =BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , ) convert_pytorch_checkpoint_to_tf(model=__snake_case , ckpt_dir=args.tf_cache_dir , model_name=args.model_name ) if __name__ == "__main__": main()
75
'''simple docstring''' from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class __UpperCamelCase : lowercase : Union[str, Any] =XGLMConfig lowercase : Optional[Any] ={} lowercase : Optional[int] ='gelu' def __init__( self, lowerCAmelCase, lowerCAmelCase=14, lowerCAmelCase=7, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=99, lowerCAmelCase=32, lowerCAmelCase=2, lowerCAmelCase=4, lowerCAmelCase=37, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=0.0_2, ): """simple docstring""" lowerCamelCase_ =parent lowerCamelCase_ =batch_size lowerCamelCase_ =seq_length lowerCamelCase_ =is_training lowerCamelCase_ =use_input_mask lowerCamelCase_ =use_labels lowerCamelCase_ =vocab_size lowerCamelCase_ =d_model lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =ffn_dim lowerCamelCase_ =activation_function lowerCamelCase_ =activation_dropout lowerCamelCase_ =attention_dropout lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =initializer_range lowerCamelCase_ =None lowerCamelCase_ =0 lowerCamelCase_ =2 lowerCamelCase_ =1 def lowercase__ ( self ): """simple docstring""" return XGLMConfig.from_pretrained('''facebook/xglm-564M''' ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length], self.vocab_size ), clip_value_min=0, clip_value_max=3 ) lowerCamelCase_ =None if self.use_input_mask: lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase_ =self.get_config() lowerCamelCase_ =floats_tensor([self.num_hidden_layers, self.num_attention_heads], 2 ) return ( config, input_ids, input_mask, head_mask, ) def lowercase__ ( self ): """simple docstring""" return XGLMConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, num_layers=self.num_hidden_layers, attention_heads=self.num_attention_heads, ffn_dim=self.ffn_dim, activation_function=self.activation_function, activation_dropout=self.activation_dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, use_cache=lowerCAmelCase, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, return_dict=lowerCAmelCase, ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.prepare_config_and_inputs() ( ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ) =config_and_inputs lowerCamelCase_ ={ '''input_ids''': input_ids, '''head_mask''': head_mask, } return config, inputs_dict @require_tf class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase : int =(TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () lowercase : Optional[Any] =(TFXGLMForCausalLM,) if is_tf_available() else () lowercase : Tuple =( {'feature-extraction': TFXGLMModel, 'text-generation': TFXGLMForCausalLM} if is_tf_available() else {} ) lowercase : Optional[Any] =False lowercase : Optional[Any] =False lowercase : Optional[int] =False def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =TFXGLMModelTester(self ) lowerCamelCase_ =ConfigTester(self, config_class=lowerCAmelCase, n_embd=37 ) def lowercase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() @slow def lowercase__ ( self ): """simple docstring""" for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ =TFXGLMModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) @unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''' ) def lowercase__ ( self ): """simple docstring""" super().test_resize_token_embeddings() @require_tf class __UpperCamelCase ( unittest.TestCase ): @slow def lowercase__ ( self, lowerCAmelCase=True ): """simple docstring""" lowerCamelCase_ =TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' ) lowerCamelCase_ =tf.convert_to_tensor([[2, 268, 9_865]], dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off lowerCamelCase_ =[2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581] # fmt: on lowerCamelCase_ =model.generate(lowerCAmelCase, do_sample=lowerCAmelCase, num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist(), lowerCAmelCase ) @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' ) lowerCamelCase_ =TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' ) tf.random.set_seed(0 ) lowerCamelCase_ =tokenizer('''Today is a nice day and''', return_tensors='''tf''' ) lowerCamelCase_ =tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(''':/CPU:0''' ): lowerCamelCase_ =model.generate(lowerCAmelCase, do_sample=lowerCAmelCase, seed=[7, 0] ) lowerCamelCase_ =tokenizer.decode(output_ids[0], skip_special_tokens=lowerCAmelCase ) lowerCamelCase_ =( '''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due''' ) self.assertEqual(lowerCAmelCase, lowerCAmelCase ) @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' ) lowerCamelCase_ =XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' ) lowerCamelCase_ ='''left''' # use different length sentences to test batching lowerCamelCase_ =[ '''This is an extremelly long sentence that only exists to test the ability of the model to cope with ''' '''left-padding, such as in batched generation. The output for the sequence below should be the same ''' '''regardless of whether left padding is applied or not. When''', '''Hello, my dog is a little''', ] lowerCamelCase_ =tokenizer(lowerCAmelCase, return_tensors='''tf''', padding=lowerCAmelCase ) lowerCamelCase_ =inputs['''input_ids'''] lowerCamelCase_ =model.generate(input_ids=lowerCAmelCase, attention_mask=inputs['''attention_mask'''], max_new_tokens=12 ) lowerCamelCase_ =tokenizer(sentences[0], return_tensors='''tf''' ).input_ids lowerCamelCase_ =model.generate(input_ids=lowerCAmelCase, max_new_tokens=12 ) lowerCamelCase_ =tokenizer(sentences[1], return_tensors='''tf''' ).input_ids lowerCamelCase_ =model.generate(input_ids=lowerCAmelCase, max_new_tokens=12 ) lowerCamelCase_ =tokenizer.batch_decode(lowerCAmelCase, skip_special_tokens=lowerCAmelCase ) lowerCamelCase_ =tokenizer.decode(output_non_padded[0], skip_special_tokens=lowerCAmelCase ) lowerCamelCase_ =tokenizer.decode(output_padded[0], skip_special_tokens=lowerCAmelCase ) lowerCamelCase_ =[ '''This is an extremelly long sentence that only exists to test the ability of the model to cope with ''' '''left-padding, such as in batched generation. The output for the sequence below should be the same ''' '''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be ''' '''a single''', '''Hello, my dog is a little bit of a shy one, but he is very friendly''', ] self.assertListEqual(lowerCAmelCase, lowerCAmelCase ) self.assertListEqual(lowerCAmelCase, [non_padded_sentence, padded_sentence] )
75
1
'''simple docstring''' import collections.abc from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_poolformer import PoolFormerConfig a_ : List[Any] = logging.get_logger(__name__) # General docstring a_ : str = """PoolFormerConfig""" # Base docstring a_ : Optional[int] = """sail/poolformer_s12""" a_ : Optional[int] = [1, 5_12, 7, 7] # Image classification docstring a_ : List[Any] = """sail/poolformer_s12""" a_ : int = """tabby, tabby cat""" a_ : List[str] = [ """sail/poolformer_s12""", # See all PoolFormer models at https://huggingface.co/models?filter=poolformer ] def a_ ( __snake_case : Dict , __snake_case : float = 0.0 , __snake_case : bool = False ) -> str: """simple docstring""" if drop_prob == 0.0 or not training: return input lowerCamelCase_ =1 - drop_prob lowerCamelCase_ =(input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets lowerCamelCase_ =keep_prob + torch.rand(__snake_case , dtype=input.dtype , device=input.device ) random_tensor.floor_() # binarize lowerCamelCase_ =input.div(__snake_case ) * random_tensor return output class __UpperCamelCase ( nn.Module ): def __init__( self, lowerCAmelCase = None ): """simple docstring""" super().__init__() lowerCamelCase_ =drop_prob def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return drop_path(lowerCAmelCase, self.drop_prob, self.training ) def lowercase__ ( self ): """simple docstring""" return "p={}".format(self.drop_prob ) class __UpperCamelCase ( nn.Module ): def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=None ): """simple docstring""" super().__init__() lowerCamelCase_ =patch_size if isinstance(lowerCAmelCase, collections.abc.Iterable ) else (patch_size, patch_size) lowerCamelCase_ =stride if isinstance(lowerCAmelCase, collections.abc.Iterable ) else (stride, stride) lowerCamelCase_ =padding if isinstance(lowerCAmelCase, collections.abc.Iterable ) else (padding, padding) lowerCamelCase_ =nn.Convad(lowerCAmelCase, lowerCAmelCase, kernel_size=lowerCAmelCase, stride=lowerCAmelCase, padding=lowerCAmelCase ) lowerCamelCase_ =norm_layer(lowerCAmelCase ) if norm_layer else nn.Identity() def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.projection(lowerCAmelCase ) lowerCamelCase_ =self.norm(lowerCAmelCase ) return embeddings class __UpperCamelCase ( nn.GroupNorm ): def __init__( self, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" super().__init__(1, lowerCAmelCase, **lowerCAmelCase ) class __UpperCamelCase ( nn.Module ): def __init__( self, lowerCAmelCase ): """simple docstring""" super().__init__() lowerCamelCase_ =nn.AvgPoolad(lowerCAmelCase, stride=1, padding=pool_size // 2, count_include_pad=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return self.pool(lowerCAmelCase ) - hidden_states class __UpperCamelCase ( nn.Module ): def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" super().__init__() lowerCamelCase_ =nn.Convad(lowerCAmelCase, lowerCAmelCase, 1 ) lowerCamelCase_ =nn.Convad(lowerCAmelCase, lowerCAmelCase, 1 ) lowerCamelCase_ =PoolFormerDropPath(lowerCAmelCase ) if isinstance(config.hidden_act, lowerCAmelCase ): lowerCamelCase_ =ACTaFN[config.hidden_act] else: lowerCamelCase_ =config.hidden_act def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.conva(lowerCAmelCase ) lowerCamelCase_ =self.act_fn(lowerCAmelCase ) lowerCamelCase_ =self.drop(lowerCAmelCase ) lowerCamelCase_ =self.conva(lowerCAmelCase ) lowerCamelCase_ =self.drop(lowerCAmelCase ) return hidden_states class __UpperCamelCase ( nn.Module ): def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" super().__init__() lowerCamelCase_ =PoolFormerPooling(lowerCAmelCase ) lowerCamelCase_ =PoolFormerOutput(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =PoolFormerGroupNorm(lowerCAmelCase ) lowerCamelCase_ =PoolFormerGroupNorm(lowerCAmelCase ) # Useful for training neural nets lowerCamelCase_ =PoolFormerDropPath(lowerCAmelCase ) if drop_path > 0.0 else nn.Identity() lowerCamelCase_ =config.use_layer_scale if config.use_layer_scale: lowerCamelCase_ =nn.Parameter( config.layer_scale_init_value * torch.ones((lowerCAmelCase) ), requires_grad=lowerCAmelCase ) lowerCamelCase_ =nn.Parameter( config.layer_scale_init_value * torch.ones((lowerCAmelCase) ), requires_grad=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" if self.use_layer_scale: lowerCamelCase_ =self.pooling(self.before_norm(lowerCAmelCase ) ) lowerCamelCase_ =self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output # First residual connection lowerCamelCase_ =hidden_states + self.drop_path(lowerCAmelCase ) lowerCamelCase_ =() lowerCamelCase_ =self.output(self.after_norm(lowerCAmelCase ) ) lowerCamelCase_ =self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output # Second residual connection lowerCamelCase_ =hidden_states + self.drop_path(lowerCAmelCase ) lowerCamelCase_ =(output,) + outputs return outputs else: lowerCamelCase_ =self.drop_path(self.pooling(self.before_norm(lowerCAmelCase ) ) ) # First residual connection lowerCamelCase_ =pooling_output + hidden_states lowerCamelCase_ =() # Second residual connection inside the PoolFormerOutput block lowerCamelCase_ =self.drop_path(self.output(self.after_norm(lowerCAmelCase ) ) ) lowerCamelCase_ =hidden_states + layer_output lowerCamelCase_ =(output,) + outputs return outputs class __UpperCamelCase ( nn.Module ): def __init__( self, lowerCAmelCase ): """simple docstring""" super().__init__() lowerCamelCase_ =config # stochastic depth decay rule lowerCamelCase_ =[x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths ) )] # patch embeddings lowerCamelCase_ =[] for i in range(config.num_encoder_blocks ): embeddings.append( PoolFormerEmbeddings( patch_size=config.patch_sizes[i], stride=config.strides[i], padding=config.padding[i], num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1], hidden_size=config.hidden_sizes[i], ) ) lowerCamelCase_ =nn.ModuleList(lowerCAmelCase ) # Transformer blocks lowerCamelCase_ =[] lowerCamelCase_ =0 for i in range(config.num_encoder_blocks ): # each block consists of layers lowerCamelCase_ =[] if i != 0: cur += config.depths[i - 1] for j in range(config.depths[i] ): layers.append( PoolFormerLayer( lowerCAmelCase, num_channels=config.hidden_sizes[i], pool_size=config.pool_size, hidden_size=config.hidden_sizes[i], intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ), drop_path=dpr[cur + j], ) ) blocks.append(nn.ModuleList(lowerCAmelCase ) ) lowerCamelCase_ =nn.ModuleList(lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=False, lowerCAmelCase=True ): """simple docstring""" lowerCamelCase_ =() if output_hidden_states else None lowerCamelCase_ =pixel_values for idx, layers in enumerate(zip(self.patch_embeddings, self.block ) ): lowerCamelCase_, lowerCamelCase_ =layers # Get patch embeddings from hidden_states lowerCamelCase_ =embedding_layer(lowerCAmelCase ) # Send the embeddings through the blocks for _, blk in enumerate(lowerCAmelCase ): lowerCamelCase_ =blk(lowerCAmelCase ) lowerCamelCase_ =layer_outputs[0] if output_hidden_states: lowerCamelCase_ =all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=lowerCAmelCase, hidden_states=lowerCAmelCase ) class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Tuple =PoolFormerConfig lowercase : Union[str, Any] ='poolformer' lowercase : Dict ='pixel_values' lowercase : Tuple =True def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" if isinstance(lowerCAmelCase, (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(lowerCAmelCase, nn.LayerNorm ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=False ): """simple docstring""" if isinstance(lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =value a_ : str = R""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ a_ : Dict = R""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`PoolFormerImageProcessor.__call__`] for details. """ @add_start_docstrings( 'The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.' , lowerCamelCase__ , ) class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, lowerCAmelCase ): """simple docstring""" super().__init__(lowerCAmelCase ) lowerCamelCase_ =config lowerCamelCase_ =PoolFormerEncoder(lowerCAmelCase ) # Initialize weights and apply final processing self.post_init() def lowercase__ ( self ): """simple docstring""" return self.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(lowerCAmelCase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=lowerCAmelCase, config_class=_CONFIG_FOR_DOC, modality='''vision''', expected_output=_EXPECTED_OUTPUT_SHAPE, ) def lowercase__ ( self, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, ): """simple docstring""" lowerCamelCase_ =( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowerCamelCase_ =return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('''You have to specify pixel_values''' ) lowerCamelCase_ =self.encoder( lowerCAmelCase, output_hidden_states=lowerCAmelCase, return_dict=lowerCAmelCase, ) lowerCamelCase_ =encoder_outputs[0] if not return_dict: return (sequence_output, None) + encoder_outputs[1:] return BaseModelOutputWithNoAttention( last_hidden_state=lowerCAmelCase, hidden_states=encoder_outputs.hidden_states, ) class __UpperCamelCase ( nn.Module ): def __init__( self, lowerCAmelCase ): """simple docstring""" super().__init__() lowerCamelCase_ =nn.Linear(config.hidden_size, config.hidden_size ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.dense(lowerCAmelCase ) return output @add_start_docstrings( '\n PoolFormer Model transformer with an image classification head on top\n ' , lowerCamelCase__ , ) class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, lowerCAmelCase ): """simple docstring""" super().__init__(lowerCAmelCase ) lowerCamelCase_ =config.num_labels lowerCamelCase_ =PoolFormerModel(lowerCAmelCase ) # Final norm lowerCamelCase_ =PoolFormerGroupNorm(config.hidden_sizes[-1] ) # Classifier head lowerCamelCase_ =( nn.Linear(config.hidden_sizes[-1], config.num_labels ) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCAmelCase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=lowerCAmelCase, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, ) def lowercase__ ( self, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, ): """simple docstring""" lowerCamelCase_ =return_dict if return_dict is not None else self.config.use_return_dict lowerCamelCase_ =self.poolformer( lowerCAmelCase, output_hidden_states=lowerCAmelCase, return_dict=lowerCAmelCase, ) lowerCamelCase_ =outputs[0] lowerCamelCase_ =self.classifier(self.norm(lowerCAmelCase ).mean([-2, -1] ) ) lowerCamelCase_ =None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: lowerCamelCase_ ='''regression''' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): lowerCamelCase_ ='''single_label_classification''' else: lowerCamelCase_ ='''multi_label_classification''' if self.config.problem_type == "regression": lowerCamelCase_ =MSELoss() if self.num_labels == 1: lowerCamelCase_ =loss_fct(logits.squeeze(), labels.squeeze() ) else: lowerCamelCase_ =loss_fct(lowerCAmelCase, lowerCAmelCase ) elif self.config.problem_type == "single_label_classification": lowerCamelCase_ =CrossEntropyLoss() lowerCamelCase_ =loss_fct(logits.view(-1, self.num_labels ), labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": lowerCamelCase_ =BCEWithLogitsLoss() lowerCamelCase_ =loss_fct(lowerCAmelCase, lowerCAmelCase ) if not return_dict: lowerCamelCase_ =(logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase, logits=lowerCAmelCase, hidden_states=outputs.hidden_states )
75
'''simple docstring''' import unittest from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __UpperCamelCase : @staticmethod def lowercase__ ( *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" pass @is_pipeline_test @require_vision @require_torch class __UpperCamelCase ( unittest.TestCase ): lowercase : int =MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =pipeline( '''zero-shot-object-detection''', model='''hf-internal-testing/tiny-random-owlvit-object-detection''' ) lowerCamelCase_ =[ { '''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''', '''candidate_labels''': ['''cat''', '''remote''', '''couch'''], } ] return object_detector, examples def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =object_detector(examples[0], threshold=0.0 ) lowerCamelCase_ =len(lowerCAmelCase ) self.assertGreater(lowerCAmelCase, 0 ) self.assertEqual( lowerCAmelCase, [ { '''score''': ANY(lowerCAmelCase ), '''label''': ANY(lowerCAmelCase ), '''box''': {'''xmin''': ANY(lowerCAmelCase ), '''ymin''': ANY(lowerCAmelCase ), '''xmax''': ANY(lowerCAmelCase ), '''ymax''': ANY(lowerCAmelCase )}, } for i in range(lowerCAmelCase ) ], ) @require_tf @unittest.skip('''Zero Shot Object Detection not implemented in TF''' ) def lowercase__ ( self ): """simple docstring""" pass @require_torch def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =pipeline( '''zero-shot-object-detection''', model='''hf-internal-testing/tiny-random-owlvit-object-detection''' ) lowerCamelCase_ =object_detector( '''./tests/fixtures/tests_samples/COCO/000000039769.png''', candidate_labels=['''cat''', '''remote''', '''couch'''], threshold=0.6_4, ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=4 ), [ {'''score''': 0.7_2_3_5, '''label''': '''cat''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}}, {'''score''': 0.7_2_1_8, '''label''': '''remote''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}}, {'''score''': 0.7_1_8_4, '''label''': '''couch''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}}, {'''score''': 0.6_7_4_8, '''label''': '''remote''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}}, {'''score''': 0.6_6_5_6, '''label''': '''cat''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}}, {'''score''': 0.6_6_1_4, '''label''': '''couch''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}}, {'''score''': 0.6_4_5_6, '''label''': '''remote''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}}, {'''score''': 0.6_4_2, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 274, '''xmax''': 93, '''ymax''': 297}}, {'''score''': 0.6_4_1_9, '''label''': '''cat''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}}, ], ) lowerCamelCase_ =object_detector( [ { '''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''', '''candidate_labels''': ['''cat''', '''remote''', '''couch'''], } ], threshold=0.6_4, ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=4 ), [ [ {'''score''': 0.7_2_3_5, '''label''': '''cat''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}}, {'''score''': 0.7_2_1_8, '''label''': '''remote''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}}, {'''score''': 0.7_1_8_4, '''label''': '''couch''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}}, {'''score''': 0.6_7_4_8, '''label''': '''remote''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}}, {'''score''': 0.6_6_5_6, '''label''': '''cat''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}}, {'''score''': 0.6_6_1_4, '''label''': '''couch''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}}, {'''score''': 0.6_4_5_6, '''label''': '''remote''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}}, {'''score''': 0.6_4_2, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 274, '''xmax''': 93, '''ymax''': 297}}, {'''score''': 0.6_4_1_9, '''label''': '''cat''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}}, ] ], ) @require_torch @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =pipeline('''zero-shot-object-detection''' ) lowerCamelCase_ =object_detector( '''http://images.cocodataset.org/val2017/000000039769.jpg''', candidate_labels=['''cat''', '''remote''', '''couch'''], ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=4 ), [ {'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}}, {'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}}, {'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}}, {'''score''': 0.1_4_7_4, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}}, {'''score''': 0.1_2_0_8, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}}, ], ) lowerCamelCase_ =object_detector( [ { '''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''', '''candidate_labels''': ['''cat''', '''remote''', '''couch'''], }, { '''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''', '''candidate_labels''': ['''cat''', '''remote''', '''couch'''], }, ], ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=4 ), [ [ {'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}}, {'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}}, {'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}}, {'''score''': 0.1_4_7_4, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}}, {'''score''': 0.1_2_0_8, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}}, ], [ {'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}}, {'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}}, {'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}}, {'''score''': 0.1_4_7_4, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}}, {'''score''': 0.1_2_0_8, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}}, ], ], ) @require_tf @unittest.skip('''Zero Shot Object Detection not implemented in TF''' ) def lowercase__ ( self ): """simple docstring""" pass @require_torch @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =0.2 lowerCamelCase_ =pipeline('''zero-shot-object-detection''' ) lowerCamelCase_ =object_detector( '''http://images.cocodataset.org/val2017/000000039769.jpg''', candidate_labels=['''cat''', '''remote''', '''couch'''], threshold=lowerCAmelCase, ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=4 ), [ {'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}}, {'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}}, {'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}}, ], ) @require_torch @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =2 lowerCamelCase_ =pipeline('''zero-shot-object-detection''' ) lowerCamelCase_ =object_detector( '''http://images.cocodataset.org/val2017/000000039769.jpg''', candidate_labels=['''cat''', '''remote''', '''couch'''], top_k=lowerCAmelCase, ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=4 ), [ {'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}}, {'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}}, ], )
75
1
'''simple docstring''' import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def a_ ( __snake_case : Optional[Any] ) -> str: """simple docstring""" lowerCamelCase_ =args.pruning_method lowerCamelCase_ =args.threshold lowerCamelCase_ =args.model_name_or_path.rstrip('''/''' ) lowerCamelCase_ =args.target_model_path print(F'''Load fine-pruned model from {model_name_or_path}''' ) lowerCamelCase_ =torch.load(os.path.join(__snake_case , '''pytorch_model.bin''' ) ) lowerCamelCase_ ={} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: lowerCamelCase_ =tensor print(F'''Copied layer {name}''' ) elif "classifier" in name or "qa_output" in name: lowerCamelCase_ =tensor print(F'''Copied layer {name}''' ) elif "bias" in name: lowerCamelCase_ =tensor print(F'''Copied layer {name}''' ) else: if pruning_method == "magnitude": lowerCamelCase_ =MagnitudeBinarizer.apply(inputs=__snake_case , threshold=__snake_case ) lowerCamelCase_ =tensor * mask print(F'''Pruned layer {name}''' ) elif pruning_method == "topK": if "mask_scores" in name: continue lowerCamelCase_ =name[:-6] lowerCamelCase_ =model[F'''{prefix_}mask_scores'''] lowerCamelCase_ =TopKBinarizer.apply(__snake_case , __snake_case ) lowerCamelCase_ =tensor * mask print(F'''Pruned layer {name}''' ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue lowerCamelCase_ =name[:-6] lowerCamelCase_ =model[F'''{prefix_}mask_scores'''] lowerCamelCase_ =ThresholdBinarizer.apply(__snake_case , __snake_case , __snake_case ) lowerCamelCase_ =tensor * mask print(F'''Pruned layer {name}''' ) elif pruning_method == "l0": if "mask_scores" in name: continue lowerCamelCase_ =name[:-6] lowerCamelCase_ =model[F'''{prefix_}mask_scores'''] lowerCamelCase_, lowerCamelCase_ =-0.1, 1.1 lowerCamelCase_ =torch.sigmoid(__snake_case ) lowerCamelCase_ =s * (r - l) + l lowerCamelCase_ =s_bar.clamp(min=0.0 , max=1.0 ) lowerCamelCase_ =tensor * mask print(F'''Pruned layer {name}''' ) else: raise ValueError('''Unknown pruning method''' ) if target_model_path is None: lowerCamelCase_ =os.path.join( os.path.dirname(__snake_case ) , F'''bertarized_{os.path.basename(__snake_case )}''' ) if not os.path.isdir(__snake_case ): shutil.copytree(__snake_case , __snake_case ) print(F'''\nCreated folder {target_model_path}''' ) torch.save(__snake_case , os.path.join(__snake_case , '''pytorch_model.bin''' ) ) print('''\nPruned model saved! See you later!''' ) if __name__ == "__main__": a_ : List[Any] = argparse.ArgumentParser() parser.add_argument( """--pruning_method""", choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""], type=str, required=True, help=( """Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,""" """ sigmoied_threshold = Soft movement pruning)""" ), ) parser.add_argument( """--threshold""", type=float, required=False, help=( """For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.""" """For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.""" """Not needed for `l0`""" ), ) parser.add_argument( """--model_name_or_path""", type=str, required=True, help="""Folder containing the model that was previously fine-pruned""", ) parser.add_argument( """--target_model_path""", default=None, type=str, required=False, help="""Folder containing the model that was previously fine-pruned""", ) a_ : Optional[Any] = parser.parse_args() main(args)
75
'''simple docstring''' import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a_ : Optional[int] = logging.get_logger(__name__) a_ : Optional[int] = { """vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_config_file""": """tokenizer_config.json""", } a_ : List[Any] = { """vocab_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json""" }, """merges_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt""" }, """tokenizer_config_file""": { """facebook/blenderbot_small-90M""": ( """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json""" ) }, } a_ : Optional[int] = {"""facebook/blenderbot_small-90M""": 5_12} def a_ ( __snake_case : List[Any] ) -> Tuple: """simple docstring""" lowerCamelCase_ =set() lowerCamelCase_ =word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCamelCase_ =char lowerCamelCase_ =set(__snake_case ) return pairs class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[int] =VOCAB_FILES_NAMES lowercase : Tuple =PRETRAINED_VOCAB_FILES_MAP lowercase : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : Dict =['input_ids', 'attention_mask'] def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase="__start__", lowerCAmelCase="__end__", lowerCAmelCase="__unk__", lowerCAmelCase="__null__", **lowerCAmelCase, ): """simple docstring""" super().__init__(unk_token=lowerCAmelCase, bos_token=lowerCAmelCase, eos_token=lowerCAmelCase, pad_token=lowerCAmelCase, **lowerCAmelCase ) with open(lowerCAmelCase, encoding='''utf-8''' ) as vocab_handle: lowerCamelCase_ =json.load(lowerCAmelCase ) lowerCamelCase_ ={v: k for k, v in self.encoder.items()} with open(lowerCAmelCase, encoding='''utf-8''' ) as merges_handle: lowerCamelCase_ =merges_handle.read().split('''\n''' )[1:-1] lowerCamelCase_ =[tuple(merge.split() ) for merge in merges] lowerCamelCase_ =dict(zip(lowerCAmelCase, range(len(lowerCAmelCase ) ) ) ) lowerCamelCase_ ={} @property def lowercase__ ( self ): """simple docstring""" return len(self.encoder ) def lowercase__ ( self ): """simple docstring""" return dict(self.encoder, **self.added_tokens_encoder ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" if token in self.cache: return self.cache[token] lowerCamelCase_ =re.sub('''([.,!?()])''', R''' \1''', lowerCAmelCase ) lowerCamelCase_ =re.sub('''(\')''', R''' \1 ''', lowerCAmelCase ) lowerCamelCase_ =re.sub(R'''\s{2,}''', ''' ''', lowerCAmelCase ) if "\n" in token: lowerCamelCase_ =token.replace('''\n''', ''' __newln__''' ) lowerCamelCase_ =token.split(''' ''' ) lowerCamelCase_ =[] for token in tokens: if not len(lowerCAmelCase ): continue lowerCamelCase_ =token.lower() lowerCamelCase_ =tuple(lowerCAmelCase ) lowerCamelCase_ =tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] ) lowerCamelCase_ =get_pairs(lowerCAmelCase ) if not pairs: words.append(lowerCAmelCase ) continue while True: lowerCamelCase_ =min(lowerCAmelCase, key=lambda lowerCAmelCase : self.bpe_ranks.get(lowerCAmelCase, float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break lowerCamelCase_, lowerCamelCase_ =bigram lowerCamelCase_ =[] lowerCamelCase_ =0 while i < len(lowerCAmelCase ): try: lowerCamelCase_ =word.index(lowerCAmelCase, lowerCAmelCase ) new_word.extend(word[i:j] ) lowerCamelCase_ =j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(lowerCAmelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCamelCase_ =tuple(lowerCAmelCase ) lowerCamelCase_ =new_word if len(lowerCAmelCase ) == 1: break else: lowerCamelCase_ =get_pairs(lowerCAmelCase ) lowerCamelCase_ ='''@@ '''.join(lowerCAmelCase ) lowerCamelCase_ =word[:-4] lowerCamelCase_ =word words.append(lowerCAmelCase ) return " ".join(lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ =re.findall(R'''\S+\n?''', lowerCAmelCase ) for token in words: split_tokens.extend(list(self.bpe(lowerCAmelCase ).split(''' ''' ) ) ) return split_tokens def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =token.lower() return self.encoder.get(lowerCAmelCase, self.encoder.get(self.unk_token ) ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return self.decoder.get(lowerCAmelCase, self.unk_token ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =''' '''.join(lowerCAmelCase ).replace('''@@ ''', '''''' ).strip() return out_string def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" if not os.path.isdir(lowerCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCamelCase_ =os.path.join( lowerCAmelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCamelCase_ =os.path.join( lowerCAmelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(lowerCAmelCase, '''w''', encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=lowerCAmelCase, ensure_ascii=lowerCAmelCase ) + '''\n''' ) lowerCamelCase_ =0 with open(lowerCAmelCase, '''w''', encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda lowerCAmelCase : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ''' Please check that the tokenizer is not corrupted!''' ) lowerCamelCase_ =token_index writer.write(''' '''.join(lowerCAmelCase ) + '''\n''' ) index += 1 return vocab_file, merge_file
75
1
'''simple docstring''' import argparse import csv import logging import os import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from tqdm import tqdm, trange from transformers import ( CONFIG_NAME, WEIGHTS_NAME, AdamW, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, get_linear_schedule_with_warmup, ) logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) a_ : Optional[int] = logging.getLogger(__name__) def a_ ( __snake_case : Union[str, Any] , __snake_case : Union[str, Any] ) -> str: """simple docstring""" lowerCamelCase_ =np.argmax(__snake_case , axis=1 ) return np.sum(outputs == labels ) def a_ ( __snake_case : List[Any] ) -> Tuple: """simple docstring""" with open(__snake_case , encoding='''utf_8''' ) as f: lowerCamelCase_ =csv.reader(__snake_case ) lowerCamelCase_ =[] next(__snake_case ) # skip the first line for line in tqdm(__snake_case ): output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) ) return output def a_ ( __snake_case : str , __snake_case : Dict , __snake_case : List[str] , __snake_case : List[str] , __snake_case : List[Any] , __snake_case : Dict ) -> Dict: """simple docstring""" lowerCamelCase_ =[] for dataset in encoded_datasets: lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =np.zeros((n_batch, 2, input_len) , dtype=np.intaa ) lowerCamelCase_ =np.zeros((n_batch, 2) , dtype=np.intaa ) lowerCamelCase_ =np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa ) lowerCamelCase_ =np.zeros((n_batch,) , dtype=np.intaa ) for ( i, (story, conta, conta, mc_label), ) in enumerate(__snake_case ): lowerCamelCase_ =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] lowerCamelCase_ =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] lowerCamelCase_ =with_conta lowerCamelCase_ =with_conta lowerCamelCase_ =len(__snake_case ) - 1 lowerCamelCase_ =len(__snake_case ) - 1 lowerCamelCase_ =with_conta lowerCamelCase_ =with_conta lowerCamelCase_ =mc_label lowerCamelCase_ =(input_ids, mc_token_ids, lm_labels, mc_labels) tensor_datasets.append(tuple(torch.tensor(__snake_case ) for t in all_inputs ) ) return tensor_datasets def a_ ( ) -> Optional[int]: """simple docstring""" lowerCamelCase_ =argparse.ArgumentParser() parser.add_argument('''--model_name''' , type=__snake_case , default='''openai-gpt''' , help='''pretrained model name''' ) parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' ) parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''' ) parser.add_argument( '''--output_dir''' , default=__snake_case , type=__snake_case , required=__snake_case , help='''The output directory where the model predictions and checkpoints will be written.''' , ) parser.add_argument('''--train_dataset''' , type=__snake_case , default='''''' ) parser.add_argument('''--eval_dataset''' , type=__snake_case , default='''''' ) parser.add_argument('''--seed''' , type=__snake_case , default=42 ) parser.add_argument('''--num_train_epochs''' , type=__snake_case , default=3 ) parser.add_argument('''--train_batch_size''' , type=__snake_case , default=8 ) parser.add_argument('''--eval_batch_size''' , type=__snake_case , default=16 ) parser.add_argument('''--adam_epsilon''' , default=1e-8 , type=__snake_case , help='''Epsilon for Adam optimizer.''' ) parser.add_argument('''--max_grad_norm''' , type=__snake_case , default=1 ) parser.add_argument( '''--max_steps''' , default=-1 , type=__snake_case , help=( '''If > 0: set total number of training steps to perform. Override num_train_epochs.''' ) , ) parser.add_argument( '''--gradient_accumulation_steps''' , type=__snake_case , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , ) parser.add_argument('''--learning_rate''' , type=__snake_case , default=6.25e-5 ) parser.add_argument('''--warmup_steps''' , default=0 , type=__snake_case , help='''Linear warmup over warmup_steps.''' ) parser.add_argument('''--lr_schedule''' , type=__snake_case , default='''warmup_linear''' ) parser.add_argument('''--weight_decay''' , type=__snake_case , default=0.0_1 ) parser.add_argument('''--lm_coef''' , type=__snake_case , default=0.9 ) parser.add_argument('''--n_valid''' , type=__snake_case , default=374 ) parser.add_argument('''--server_ip''' , type=__snake_case , default='''''' , help='''Can be used for distant debugging.''' ) parser.add_argument('''--server_port''' , type=__snake_case , default='''''' , help='''Can be used for distant debugging.''' ) lowerCamelCase_ =parser.parse_args() print(__snake_case ) if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('''Waiting for debugger attach''' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__snake_case ) ptvsd.wait_for_attach() random.seed(args.seed ) np.random.seed(args.seed ) torch.manual_seed(args.seed ) torch.cuda.manual_seed_all(args.seed ) lowerCamelCase_ =torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) lowerCamelCase_ =torch.cuda.device_count() logger.info('''device: {}, n_gpu {}'''.format(__snake_case , __snake_case ) ) if not args.do_train and not args.do_eval: raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' ) if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) # Load tokenizer and model # This loading functions also add new tokens and embeddings called `special tokens` # These new embeddings will be fine-tuned on the RocStories dataset lowerCamelCase_ =['''_start_''', '''_delimiter_''', '''_classify_'''] lowerCamelCase_ =OpenAIGPTTokenizer.from_pretrained(args.model_name ) tokenizer.add_tokens(__snake_case ) lowerCamelCase_ =tokenizer.convert_tokens_to_ids(__snake_case ) lowerCamelCase_ =OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name ) model.resize_token_embeddings(len(__snake_case ) ) model.to(__snake_case ) # Load and encode the datasets def tokenize_and_encode(__snake_case : Union[str, Any] ): if isinstance(__snake_case , __snake_case ): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(__snake_case ) ) elif isinstance(__snake_case , __snake_case ): return obj return [tokenize_and_encode(__snake_case ) for o in obj] logger.info('''Encoding dataset...''' ) lowerCamelCase_ =load_rocstories_dataset(args.train_dataset ) lowerCamelCase_ =load_rocstories_dataset(args.eval_dataset ) lowerCamelCase_ =(train_dataset, eval_dataset) lowerCamelCase_ =tokenize_and_encode(__snake_case ) # Compute the max input length for the Transformer lowerCamelCase_ =model.config.n_positions // 2 - 2 lowerCamelCase_ =max( len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3 for dataset in encoded_datasets for story, conta, conta, _ in dataset ) lowerCamelCase_ =min(__snake_case , model.config.n_positions ) # Max size of input for the pre-trained model # Prepare inputs tensors and dataloaders lowerCamelCase_ =pre_process_datasets(__snake_case , __snake_case , __snake_case , *__snake_case ) lowerCamelCase_, lowerCamelCase_ =tensor_datasets[0], tensor_datasets[1] lowerCamelCase_ =TensorDataset(*__snake_case ) lowerCamelCase_ =RandomSampler(__snake_case ) lowerCamelCase_ =DataLoader(__snake_case , sampler=__snake_case , batch_size=args.train_batch_size ) lowerCamelCase_ =TensorDataset(*__snake_case ) lowerCamelCase_ =SequentialSampler(__snake_case ) lowerCamelCase_ =DataLoader(__snake_case , sampler=__snake_case , batch_size=args.eval_batch_size ) # Prepare optimizer if args.do_train: if args.max_steps > 0: lowerCamelCase_ =args.max_steps lowerCamelCase_ =args.max_steps // (len(__snake_case ) // args.gradient_accumulation_steps) + 1 else: lowerCamelCase_ =len(__snake_case ) // args.gradient_accumulation_steps * args.num_train_epochs lowerCamelCase_ =list(model.named_parameters() ) lowerCamelCase_ =['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight'''] lowerCamelCase_ =[ { '''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )], '''weight_decay''': args.weight_decay, }, {'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0}, ] lowerCamelCase_ =AdamW(__snake_case , lr=args.learning_rate , eps=args.adam_epsilon ) lowerCamelCase_ =get_linear_schedule_with_warmup( __snake_case , num_warmup_steps=args.warmup_steps , num_training_steps=__snake_case ) if args.do_train: lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =0, 0, None model.train() for _ in trange(int(args.num_train_epochs ) , desc='''Epoch''' ): lowerCamelCase_ =0 lowerCamelCase_ =0 lowerCamelCase_ =tqdm(__snake_case , desc='''Training''' ) for step, batch in enumerate(__snake_case ): lowerCamelCase_ =tuple(t.to(__snake_case ) for t in batch ) lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =batch lowerCamelCase_ =model(__snake_case , mc_token_ids=__snake_case , lm_labels=__snake_case , mc_labels=__snake_case ) lowerCamelCase_ =args.lm_coef * losses[0] + losses[1] loss.backward() optimizer.step() scheduler.step() optimizer.zero_grad() tr_loss += loss.item() lowerCamelCase_ =( loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item() ) nb_tr_steps += 1 lowerCamelCase_ ='''Training loss: {:.2e} lr: {:.2e}'''.format(__snake_case , scheduler.get_lr()[0] ) # Save a trained model if args.do_train: # Save a trained model, configuration and tokenizer lowerCamelCase_ =model.module if hasattr(__snake_case , '''module''' ) else model # Only save the model itself # If we save using the predefined names, we can load using `from_pretrained` lowerCamelCase_ =os.path.join(args.output_dir , __snake_case ) lowerCamelCase_ =os.path.join(args.output_dir , __snake_case ) torch.save(model_to_save.state_dict() , __snake_case ) model_to_save.config.to_json_file(__snake_case ) tokenizer.save_vocabulary(args.output_dir ) # Load a trained model and vocabulary that you have fine-tuned lowerCamelCase_ =OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir ) lowerCamelCase_ =OpenAIGPTTokenizer.from_pretrained(args.output_dir ) model.to(__snake_case ) if args.do_eval: model.eval() lowerCamelCase_, lowerCamelCase_ =0, 0 lowerCamelCase_, lowerCamelCase_ =0, 0 for batch in tqdm(__snake_case , desc='''Evaluating''' ): lowerCamelCase_ =tuple(t.to(__snake_case ) for t in batch ) lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =batch with torch.no_grad(): lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =model( __snake_case , mc_token_ids=__snake_case , lm_labels=__snake_case , mc_labels=__snake_case ) lowerCamelCase_ =mc_logits.detach().cpu().numpy() lowerCamelCase_ =mc_labels.to('''cpu''' ).numpy() lowerCamelCase_ =accuracy(__snake_case , __snake_case ) eval_loss += mc_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0 ) nb_eval_steps += 1 lowerCamelCase_ =eval_loss / nb_eval_steps lowerCamelCase_ =eval_accuracy / nb_eval_examples lowerCamelCase_ =tr_loss / nb_tr_steps if args.do_train else None lowerCamelCase_ ={'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss} lowerCamelCase_ =os.path.join(args.output_dir , '''eval_results.txt''' ) with open(__snake_case , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key in sorted(result.keys() ): logger.info(''' %s = %s''' , __snake_case , str(result[key] ) ) writer.write('''%s = %s\n''' % (key, str(result[key] )) ) if __name__ == "__main__": main()
75
'''simple docstring''' from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Dict = logging.get_logger(__name__) a_ : Any = { """snap-research/efficientformer-l1-300""": ( """https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json""" ), } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : List[str] ='efficientformer' def __init__( self, lowerCAmelCase = [3, 2, 6, 4], lowerCAmelCase = [48, 96, 224, 448], lowerCAmelCase = [True, True, True, True], lowerCAmelCase = 448, lowerCAmelCase = 32, lowerCAmelCase = 4, lowerCAmelCase = 7, lowerCAmelCase = 5, lowerCAmelCase = 8, lowerCAmelCase = 4, lowerCAmelCase = 0.0, lowerCAmelCase = 16, lowerCAmelCase = 3, lowerCAmelCase = 3, lowerCAmelCase = 3, lowerCAmelCase = 2, lowerCAmelCase = 1, lowerCAmelCase = 0.0, lowerCAmelCase = 1, lowerCAmelCase = True, lowerCAmelCase = True, lowerCAmelCase = 1e-5, lowerCAmelCase = "gelu", lowerCAmelCase = 0.0_2, lowerCAmelCase = 1e-12, lowerCAmelCase = 224, lowerCAmelCase = 1e-05, **lowerCAmelCase, ): """simple docstring""" super().__init__(**lowerCAmelCase ) lowerCamelCase_ =hidden_act lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =hidden_sizes lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =initializer_range lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =patch_size lowerCamelCase_ =num_channels lowerCamelCase_ =depths lowerCamelCase_ =mlp_expansion_ratio lowerCamelCase_ =downsamples lowerCamelCase_ =dim lowerCamelCase_ =key_dim lowerCamelCase_ =attention_ratio lowerCamelCase_ =resolution lowerCamelCase_ =pool_size lowerCamelCase_ =downsample_patch_size lowerCamelCase_ =downsample_stride lowerCamelCase_ =downsample_pad lowerCamelCase_ =drop_path_rate lowerCamelCase_ =num_metaad_blocks lowerCamelCase_ =distillation lowerCamelCase_ =use_layer_scale lowerCamelCase_ =layer_scale_init_value lowerCamelCase_ =image_size lowerCamelCase_ =batch_norm_eps
75
1
'''simple docstring''' import math def a_ ( __snake_case : int ) -> bool: """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__snake_case ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def a_ ( __snake_case : int = 1_0001 ) -> int: """simple docstring""" try: lowerCamelCase_ =int(__snake_case ) except (TypeError, ValueError): raise TypeError('''Parameter nth must be int or castable to int.''' ) from None if nth <= 0: raise ValueError('''Parameter nth must be greater than or equal to one.''' ) lowerCamelCase_ =[] lowerCamelCase_ =2 while len(__snake_case ) < nth: if is_prime(__snake_case ): primes.append(__snake_case ) num += 1 else: num += 1 return primes[len(__snake_case ) - 1] if __name__ == "__main__": print(F"""{solution() = }""")
75
'''simple docstring''' import itertools import random import unittest import numpy as np from transformers import is_speech_available from transformers.testing_utils import require_torch, require_torchaudio from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import SpeechaTextFeatureExtractor a_ : Union[str, Any] = random.Random() def a_ ( __snake_case : int , __snake_case : int=1.0 , __snake_case : Tuple=None , __snake_case : Union[str, Any]=None ) -> str: """simple docstring""" if rng is None: lowerCamelCase_ =global_rng lowerCamelCase_ =[] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class __UpperCamelCase ( unittest.TestCase ): def __init__( self, lowerCAmelCase, lowerCAmelCase=7, lowerCAmelCase=400, lowerCAmelCase=2_000, lowerCAmelCase=24, lowerCAmelCase=24, lowerCAmelCase=0.0, lowerCAmelCase=16_000, lowerCAmelCase=True, lowerCAmelCase=True, ): """simple docstring""" lowerCamelCase_ =parent lowerCamelCase_ =batch_size lowerCamelCase_ =min_seq_length lowerCamelCase_ =max_seq_length lowerCamelCase_ =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) lowerCamelCase_ =feature_size lowerCamelCase_ =num_mel_bins lowerCamelCase_ =padding_value lowerCamelCase_ =sampling_rate lowerCamelCase_ =return_attention_mask lowerCamelCase_ =do_normalize def lowercase__ ( self ): """simple docstring""" return { "feature_size": self.feature_size, "num_mel_bins": self.num_mel_bins, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def lowercase__ ( self, lowerCAmelCase=False, lowerCAmelCase=False ): """simple docstring""" def _flatten(lowerCAmelCase ): return list(itertools.chain(*lowerCAmelCase ) ) if equal_length: lowerCamelCase_ =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size lowerCamelCase_ =[ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff ) ] if numpify: lowerCamelCase_ =[np.asarray(lowerCAmelCase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ): lowercase : Any =SpeechaTextFeatureExtractor if is_speech_available() else None def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =SpeechaTextFeatureExtractionTester(self ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" self.assertTrue(np.all(np.mean(lowerCAmelCase, axis=0 ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase, axis=0 ) - 1 ) < 1e-3 ) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 lowerCamelCase_ =[floats_list((1, x) )[0] for x in range(800, 1_400, 200 )] lowerCamelCase_ =[np.asarray(lowerCAmelCase ) for speech_input in speech_inputs] # Test feature size lowerCamelCase_ =feature_extractor(lowerCAmelCase, padding=lowerCAmelCase, return_tensors='''np''' ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size ) # Test not batched input lowerCamelCase_ =feature_extractor(speech_inputs[0], return_tensors='''np''' ).input_features lowerCamelCase_ =feature_extractor(np_speech_inputs[0], return_tensors='''np''' ).input_features self.assertTrue(np.allclose(lowerCAmelCase, lowerCAmelCase, atol=1e-3 ) ) # Test batched lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''np''' ).input_features lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(lowerCAmelCase, lowerCAmelCase ): self.assertTrue(np.allclose(lowerCAmelCase, lowerCAmelCase, atol=1e-3 ) ) # Test 2-D numpy arrays are batched. lowerCamelCase_ =[floats_list((1, x) )[0] for x in (800, 800, 800)] lowerCamelCase_ =np.asarray(lowerCAmelCase ) lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''np''' ).input_features lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(lowerCAmelCase, lowerCAmelCase ): self.assertTrue(np.allclose(lowerCAmelCase, lowerCAmelCase, atol=1e-3 ) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase_ =[floats_list((1, x) )[0] for x in range(800, 1_400, 200 )] lowerCamelCase_ =['''longest''', '''max_length''', '''do_not_pad'''] lowerCamelCase_ =[None, 16, None] for max_length, padding in zip(lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =feature_extractor( lowerCAmelCase, padding=lowerCAmelCase, max_length=lowerCAmelCase, return_attention_mask=lowerCAmelCase ) lowerCamelCase_ =inputs.input_features lowerCamelCase_ =inputs.attention_mask lowerCamelCase_ =[np.sum(lowerCAmelCase ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase_ =[floats_list((1, x) )[0] for x in range(800, 1_400, 200 )] lowerCamelCase_ =['''longest''', '''max_length''', '''do_not_pad'''] lowerCamelCase_ =[None, 16, None] for max_length, padding in zip(lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =feature_extractor( lowerCAmelCase, max_length=lowerCAmelCase, padding=lowerCAmelCase, return_tensors='''np''', return_attention_mask=lowerCAmelCase ) lowerCamelCase_ =inputs.input_features lowerCamelCase_ =inputs.attention_mask lowerCamelCase_ =[np.sum(lowerCAmelCase ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase_ =[floats_list((1, x) )[0] for x in range(800, 1_400, 200 )] lowerCamelCase_ =feature_extractor( lowerCAmelCase, padding='''max_length''', max_length=4, truncation=lowerCAmelCase, return_tensors='''np''', return_attention_mask=lowerCAmelCase, ) lowerCamelCase_ =inputs.input_features lowerCamelCase_ =inputs.attention_mask lowerCamelCase_ =np.sum(attention_mask == 1, axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1] ) self._check_zero_mean_unit_variance(input_features[2] ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase_ =[floats_list((1, x) )[0] for x in range(800, 1_400, 200 )] lowerCamelCase_ =feature_extractor( lowerCAmelCase, padding='''longest''', max_length=4, truncation=lowerCAmelCase, return_tensors='''np''', return_attention_mask=lowerCAmelCase, ) lowerCamelCase_ =inputs.input_features lowerCamelCase_ =inputs.attention_mask lowerCamelCase_ =np.sum(attention_mask == 1, axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape, (3, 4, 24) ) lowerCamelCase_ =[floats_list((1, x) )[0] for x in range(800, 1_400, 200 )] lowerCamelCase_ =feature_extractor( lowerCAmelCase, padding='''longest''', max_length=16, truncation=lowerCAmelCase, return_tensors='''np''', return_attention_mask=lowerCAmelCase, ) lowerCamelCase_ =inputs.input_features lowerCamelCase_ =inputs.attention_mask lowerCamelCase_ =np.sum(attention_mask == 1, axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape, (3, 6, 24) ) def lowercase__ ( self ): """simple docstring""" import torch lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase_ =np.random.rand(100, 32 ).astype(np.floataa ) lowerCamelCase_ =np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: lowerCamelCase_ =feature_extractor.pad([{'''input_features''': inputs}], return_tensors='''np''' ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) lowerCamelCase_ =feature_extractor.pad([{'''input_features''': inputs}], return_tensors='''pt''' ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" from datasets import load_dataset lowerCamelCase_ =load_dataset('''hf-internal-testing/librispeech_asr_dummy''', '''clean''', split='''validation''' ) # automatic decoding with librispeech lowerCamelCase_ =ds.sort('''id''' ).select(range(lowerCAmelCase ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =np.array([ -1.5_7_4_5, -1.7_7_1_3, -1.7_0_2_0, -1.6_0_6_9, -1.2_2_5_0, -1.1_1_0_5, -0.9_0_7_2, -0.8_2_4_1, -1.2_3_1_0, -0.8_0_9_8, -0.3_3_2_0, -0.4_1_0_1, -0.7_9_8_5, -0.4_9_9_6, -0.8_2_1_3, -0.9_1_2_8, -1.0_4_2_0, -1.1_2_8_6, -1.0_4_4_0, -0.7_9_9_9, -0.8_4_0_5, -1.2_2_7_5, -1.5_4_4_3, -1.4_6_2_5, ] ) # fmt: on lowerCamelCase_ =self._load_datasamples(1 ) lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''pt''' ).input_features self.assertEquals(input_features.shape, (1, 584, 24) ) self.assertTrue(np.allclose(input_features[0, 0, :30], lowerCAmelCase, atol=1e-4 ) )
75
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Dict = logging.get_logger(__name__) a_ : Tuple = { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json""" ), } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Tuple ='dpr' def __init__( self, lowerCAmelCase=30_522, lowerCAmelCase=768, lowerCAmelCase=12, lowerCAmelCase=12, lowerCAmelCase=3_072, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=2, lowerCAmelCase=0.0_2, lowerCAmelCase=1e-12, lowerCAmelCase=0, lowerCAmelCase="absolute", lowerCAmelCase = 0, **lowerCAmelCase, ): """simple docstring""" super().__init__(pad_token_id=lowerCAmelCase, **lowerCAmelCase ) lowerCamelCase_ =vocab_size lowerCamelCase_ =hidden_size lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =hidden_act lowerCamelCase_ =intermediate_size lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =type_vocab_size lowerCamelCase_ =initializer_range lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =projection_dim lowerCamelCase_ =position_embedding_type
75
'''simple docstring''' def a_ ( __snake_case : Any , __snake_case : List[str] ) -> str: """simple docstring""" lowerCamelCase_ ='''''' for i in table: res += inp[i - 1] return res def a_ ( __snake_case : List[str] ) -> Optional[int]: """simple docstring""" return data[1:] + data[0] def a_ ( __snake_case : str , __snake_case : Tuple ) -> int: """simple docstring""" lowerCamelCase_ ='''''' for i in range(len(__snake_case ) ): if a[i] == b[i]: res += "0" else: res += "1" return res def a_ ( __snake_case : Optional[Any] , __snake_case : Tuple ) -> List[Any]: """simple docstring""" lowerCamelCase_ =int('''0b''' + data[0] + data[-1] , 2 ) lowerCamelCase_ =int('''0b''' + data[1:3] , 2 ) return bin(s[row][col] )[2:] def a_ ( __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : int , __snake_case : Tuple , __snake_case : List[Any] ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ =message[:4] lowerCamelCase_ =message[4:] lowerCamelCase_ =apply_table(__snake_case , __snake_case ) lowerCamelCase_ =xor(__snake_case , __snake_case ) lowerCamelCase_ =apply_sbox(__snake_case , temp[:4] ) # noqa: E741 lowerCamelCase_ =apply_sbox(__snake_case , temp[4:] ) lowerCamelCase_ ='''0''' * (2 - len(__snake_case )) + l # noqa: E741 lowerCamelCase_ ='''0''' * (2 - len(__snake_case )) + r lowerCamelCase_ =apply_table(l + r , __snake_case ) lowerCamelCase_ =xor(__snake_case , __snake_case ) return temp + right if __name__ == "__main__": a_ : Any = input("""Enter 10 bit key: """) a_ : Any = input("""Enter 8 bit message: """) a_ : str = [6, 3, 7, 4, 8, 5, 10, 9] a_ : str = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6] a_ : str = [2, 4, 3, 1] a_ : Optional[int] = [2, 6, 3, 1, 4, 8, 5, 7] a_ : Optional[Any] = [4, 1, 3, 5, 7, 2, 8, 6] a_ : Union[str, Any] = [4, 1, 2, 3, 2, 3, 4, 1] a_ : int = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]] a_ : Any = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]] # key generation a_ : List[Any] = apply_table(key, paa_table) a_ : str = temp[:5] a_ : Optional[Any] = temp[5:] a_ : Tuple = left_shift(left) a_ : Optional[Any] = left_shift(right) a_ : str = apply_table(left + right, pa_table) a_ : Optional[Any] = left_shift(left) a_ : Tuple = left_shift(right) a_ : Union[str, Any] = left_shift(left) a_ : List[str] = left_shift(right) a_ : Optional[int] = apply_table(left + right, pa_table) # encryption a_ : Optional[int] = apply_table(message, IP) a_ : List[Any] = function(expansion, sa, sa, keya, temp) a_ : str = temp[4:] + temp[:4] a_ : List[str] = function(expansion, sa, sa, keya, temp) a_ : Union[str, Any] = apply_table(temp, IP_inv) print("""Cipher text is:""", CT) # decryption a_ : Optional[int] = apply_table(CT, IP) a_ : List[Any] = function(expansion, sa, sa, keya, temp) a_ : int = temp[4:] + temp[:4] a_ : int = function(expansion, sa, sa, keya, temp) a_ : Optional[int] = apply_table(temp, IP_inv) print("""Plain text after decypting is:""", PT)
75
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available a_ : str = {"""tokenization_herbert""": ["""HerbertTokenizer"""]} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[int] = ["""HerbertTokenizerFast"""] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys a_ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
75
'''simple docstring''' import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all feature extractors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...feature_extraction_utils import FeatureExtractionMixin from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) a_ : List[Any] = logging.get_logger(__name__) a_ : Tuple = OrderedDict( [ ("""audio-spectrogram-transformer""", """ASTFeatureExtractor"""), ("""beit""", """BeitFeatureExtractor"""), ("""chinese_clip""", """ChineseCLIPFeatureExtractor"""), ("""clap""", """ClapFeatureExtractor"""), ("""clip""", """CLIPFeatureExtractor"""), ("""clipseg""", """ViTFeatureExtractor"""), ("""conditional_detr""", """ConditionalDetrFeatureExtractor"""), ("""convnext""", """ConvNextFeatureExtractor"""), ("""cvt""", """ConvNextFeatureExtractor"""), ("""data2vec-audio""", """Wav2Vec2FeatureExtractor"""), ("""data2vec-vision""", """BeitFeatureExtractor"""), ("""deformable_detr""", """DeformableDetrFeatureExtractor"""), ("""deit""", """DeiTFeatureExtractor"""), ("""detr""", """DetrFeatureExtractor"""), ("""dinat""", """ViTFeatureExtractor"""), ("""donut-swin""", """DonutFeatureExtractor"""), ("""dpt""", """DPTFeatureExtractor"""), ("""encodec""", """EncodecFeatureExtractor"""), ("""flava""", """FlavaFeatureExtractor"""), ("""glpn""", """GLPNFeatureExtractor"""), ("""groupvit""", """CLIPFeatureExtractor"""), ("""hubert""", """Wav2Vec2FeatureExtractor"""), ("""imagegpt""", """ImageGPTFeatureExtractor"""), ("""layoutlmv2""", """LayoutLMv2FeatureExtractor"""), ("""layoutlmv3""", """LayoutLMv3FeatureExtractor"""), ("""levit""", """LevitFeatureExtractor"""), ("""maskformer""", """MaskFormerFeatureExtractor"""), ("""mctct""", """MCTCTFeatureExtractor"""), ("""mobilenet_v1""", """MobileNetV1FeatureExtractor"""), ("""mobilenet_v2""", """MobileNetV2FeatureExtractor"""), ("""mobilevit""", """MobileViTFeatureExtractor"""), ("""nat""", """ViTFeatureExtractor"""), ("""owlvit""", """OwlViTFeatureExtractor"""), ("""perceiver""", """PerceiverFeatureExtractor"""), ("""poolformer""", """PoolFormerFeatureExtractor"""), ("""regnet""", """ConvNextFeatureExtractor"""), ("""resnet""", """ConvNextFeatureExtractor"""), ("""segformer""", """SegformerFeatureExtractor"""), ("""sew""", """Wav2Vec2FeatureExtractor"""), ("""sew-d""", """Wav2Vec2FeatureExtractor"""), ("""speech_to_text""", """Speech2TextFeatureExtractor"""), ("""speecht5""", """SpeechT5FeatureExtractor"""), ("""swiftformer""", """ViTFeatureExtractor"""), ("""swin""", """ViTFeatureExtractor"""), ("""swinv2""", """ViTFeatureExtractor"""), ("""table-transformer""", """DetrFeatureExtractor"""), ("""timesformer""", """VideoMAEFeatureExtractor"""), ("""tvlt""", """TvltFeatureExtractor"""), ("""unispeech""", """Wav2Vec2FeatureExtractor"""), ("""unispeech-sat""", """Wav2Vec2FeatureExtractor"""), ("""van""", """ConvNextFeatureExtractor"""), ("""videomae""", """VideoMAEFeatureExtractor"""), ("""vilt""", """ViltFeatureExtractor"""), ("""vit""", """ViTFeatureExtractor"""), ("""vit_mae""", """ViTFeatureExtractor"""), ("""vit_msn""", """ViTFeatureExtractor"""), ("""wav2vec2""", """Wav2Vec2FeatureExtractor"""), ("""wav2vec2-conformer""", """Wav2Vec2FeatureExtractor"""), ("""wavlm""", """Wav2Vec2FeatureExtractor"""), ("""whisper""", """WhisperFeatureExtractor"""), ("""xclip""", """CLIPFeatureExtractor"""), ("""yolos""", """YolosFeatureExtractor"""), ] ) a_ : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES) def a_ ( __snake_case : str ) -> Any: """simple docstring""" for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items(): if class_name in extractors: lowerCamelCase_ =model_type_to_module_name(__snake_case ) lowerCamelCase_ =importlib.import_module(F'''.{module_name}''' , '''transformers.models''' ) try: return getattr(__snake_case , __snake_case ) except AttributeError: continue for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items(): if getattr(__snake_case , '''__name__''' , __snake_case ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. lowerCamelCase_ =importlib.import_module('''transformers''' ) if hasattr(__snake_case , __snake_case ): return getattr(__snake_case , __snake_case ) return None def a_ ( __snake_case : Union[str, os.PathLike] , __snake_case : Optional[Union[str, os.PathLike]] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[Dict[str, str]] = None , __snake_case : Optional[Union[bool, str]] = None , __snake_case : Optional[str] = None , __snake_case : bool = False , **__snake_case : Union[str, Any] , ) -> List[str]: """simple docstring""" lowerCamelCase_ =get_file_from_repo( __snake_case , __snake_case , cache_dir=__snake_case , force_download=__snake_case , resume_download=__snake_case , proxies=__snake_case , use_auth_token=__snake_case , revision=__snake_case , local_files_only=__snake_case , ) if resolved_config_file is None: logger.info( '''Could not locate the feature extractor configuration file, will try to use the model config instead.''' ) return {} with open(__snake_case , encoding='''utf-8''' ) as reader: return json.load(__snake_case ) class __UpperCamelCase : def __init__( self ): """simple docstring""" raise EnvironmentError( '''AutoFeatureExtractor is designed to be instantiated ''' '''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' ) @classmethod @replace_list_option_in_docstrings(lowerCAmelCase ) def lowercase__ ( cls, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =kwargs.pop('''config''', lowerCAmelCase ) lowerCamelCase_ =kwargs.pop('''trust_remote_code''', lowerCAmelCase ) lowerCamelCase_ =True lowerCamelCase_, lowerCamelCase_ =FeatureExtractionMixin.get_feature_extractor_dict(lowerCAmelCase, **lowerCAmelCase ) lowerCamelCase_ =config_dict.get('''feature_extractor_type''', lowerCAmelCase ) lowerCamelCase_ =None if "AutoFeatureExtractor" in config_dict.get('''auto_map''', {} ): lowerCamelCase_ =config_dict['''auto_map''']['''AutoFeatureExtractor'''] # If we don't find the feature extractor class in the feature extractor config, let's try the model config. if feature_extractor_class is None and feature_extractor_auto_map is None: if not isinstance(lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase, **lowerCAmelCase ) # It could be in `config.feature_extractor_type`` lowerCamelCase_ =getattr(lowerCAmelCase, '''feature_extractor_type''', lowerCAmelCase ) if hasattr(lowerCAmelCase, '''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map: lowerCamelCase_ =config.auto_map['''AutoFeatureExtractor'''] if feature_extractor_class is not None: lowerCamelCase_ =feature_extractor_class_from_name(lowerCAmelCase ) lowerCamelCase_ =feature_extractor_auto_map is not None lowerCamelCase_ =feature_extractor_class is not None or type(lowerCAmelCase ) in FEATURE_EXTRACTOR_MAPPING lowerCamelCase_ =resolve_trust_remote_code( lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) if has_remote_code and trust_remote_code: lowerCamelCase_ =get_class_from_dynamic_module( lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ) lowerCamelCase_ =kwargs.pop('''code_revision''', lowerCAmelCase ) if os.path.isdir(lowerCAmelCase ): feature_extractor_class.register_for_auto_class() return feature_extractor_class.from_dict(lowerCAmelCase, **lowerCAmelCase ) elif feature_extractor_class is not None: return feature_extractor_class.from_dict(lowerCAmelCase, **lowerCAmelCase ) # Last try: we use the FEATURE_EXTRACTOR_MAPPING. elif type(lowerCAmelCase ) in FEATURE_EXTRACTOR_MAPPING: lowerCamelCase_ =FEATURE_EXTRACTOR_MAPPING[type(lowerCAmelCase )] return feature_extractor_class.from_dict(lowerCAmelCase, **lowerCAmelCase ) raise ValueError( f'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a ''' f'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following ''' f'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' ) @staticmethod def lowercase__ ( lowerCAmelCase, lowerCAmelCase ): """simple docstring""" FEATURE_EXTRACTOR_MAPPING.register(lowerCAmelCase, lowerCAmelCase )
75
1
'''simple docstring''' import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all feature extractors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...feature_extraction_utils import FeatureExtractionMixin from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) a_ : List[Any] = logging.get_logger(__name__) a_ : Tuple = OrderedDict( [ ("""audio-spectrogram-transformer""", """ASTFeatureExtractor"""), ("""beit""", """BeitFeatureExtractor"""), ("""chinese_clip""", """ChineseCLIPFeatureExtractor"""), ("""clap""", """ClapFeatureExtractor"""), ("""clip""", """CLIPFeatureExtractor"""), ("""clipseg""", """ViTFeatureExtractor"""), ("""conditional_detr""", """ConditionalDetrFeatureExtractor"""), ("""convnext""", """ConvNextFeatureExtractor"""), ("""cvt""", """ConvNextFeatureExtractor"""), ("""data2vec-audio""", """Wav2Vec2FeatureExtractor"""), ("""data2vec-vision""", """BeitFeatureExtractor"""), ("""deformable_detr""", """DeformableDetrFeatureExtractor"""), ("""deit""", """DeiTFeatureExtractor"""), ("""detr""", """DetrFeatureExtractor"""), ("""dinat""", """ViTFeatureExtractor"""), ("""donut-swin""", """DonutFeatureExtractor"""), ("""dpt""", """DPTFeatureExtractor"""), ("""encodec""", """EncodecFeatureExtractor"""), ("""flava""", """FlavaFeatureExtractor"""), ("""glpn""", """GLPNFeatureExtractor"""), ("""groupvit""", """CLIPFeatureExtractor"""), ("""hubert""", """Wav2Vec2FeatureExtractor"""), ("""imagegpt""", """ImageGPTFeatureExtractor"""), ("""layoutlmv2""", """LayoutLMv2FeatureExtractor"""), ("""layoutlmv3""", """LayoutLMv3FeatureExtractor"""), ("""levit""", """LevitFeatureExtractor"""), ("""maskformer""", """MaskFormerFeatureExtractor"""), ("""mctct""", """MCTCTFeatureExtractor"""), ("""mobilenet_v1""", """MobileNetV1FeatureExtractor"""), ("""mobilenet_v2""", """MobileNetV2FeatureExtractor"""), ("""mobilevit""", """MobileViTFeatureExtractor"""), ("""nat""", """ViTFeatureExtractor"""), ("""owlvit""", """OwlViTFeatureExtractor"""), ("""perceiver""", """PerceiverFeatureExtractor"""), ("""poolformer""", """PoolFormerFeatureExtractor"""), ("""regnet""", """ConvNextFeatureExtractor"""), ("""resnet""", """ConvNextFeatureExtractor"""), ("""segformer""", """SegformerFeatureExtractor"""), ("""sew""", """Wav2Vec2FeatureExtractor"""), ("""sew-d""", """Wav2Vec2FeatureExtractor"""), ("""speech_to_text""", """Speech2TextFeatureExtractor"""), ("""speecht5""", """SpeechT5FeatureExtractor"""), ("""swiftformer""", """ViTFeatureExtractor"""), ("""swin""", """ViTFeatureExtractor"""), ("""swinv2""", """ViTFeatureExtractor"""), ("""table-transformer""", """DetrFeatureExtractor"""), ("""timesformer""", """VideoMAEFeatureExtractor"""), ("""tvlt""", """TvltFeatureExtractor"""), ("""unispeech""", """Wav2Vec2FeatureExtractor"""), ("""unispeech-sat""", """Wav2Vec2FeatureExtractor"""), ("""van""", """ConvNextFeatureExtractor"""), ("""videomae""", """VideoMAEFeatureExtractor"""), ("""vilt""", """ViltFeatureExtractor"""), ("""vit""", """ViTFeatureExtractor"""), ("""vit_mae""", """ViTFeatureExtractor"""), ("""vit_msn""", """ViTFeatureExtractor"""), ("""wav2vec2""", """Wav2Vec2FeatureExtractor"""), ("""wav2vec2-conformer""", """Wav2Vec2FeatureExtractor"""), ("""wavlm""", """Wav2Vec2FeatureExtractor"""), ("""whisper""", """WhisperFeatureExtractor"""), ("""xclip""", """CLIPFeatureExtractor"""), ("""yolos""", """YolosFeatureExtractor"""), ] ) a_ : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES) def a_ ( __snake_case : str ) -> Any: """simple docstring""" for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items(): if class_name in extractors: lowerCamelCase_ =model_type_to_module_name(__snake_case ) lowerCamelCase_ =importlib.import_module(F'''.{module_name}''' , '''transformers.models''' ) try: return getattr(__snake_case , __snake_case ) except AttributeError: continue for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items(): if getattr(__snake_case , '''__name__''' , __snake_case ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. lowerCamelCase_ =importlib.import_module('''transformers''' ) if hasattr(__snake_case , __snake_case ): return getattr(__snake_case , __snake_case ) return None def a_ ( __snake_case : Union[str, os.PathLike] , __snake_case : Optional[Union[str, os.PathLike]] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[Dict[str, str]] = None , __snake_case : Optional[Union[bool, str]] = None , __snake_case : Optional[str] = None , __snake_case : bool = False , **__snake_case : Union[str, Any] , ) -> List[str]: """simple docstring""" lowerCamelCase_ =get_file_from_repo( __snake_case , __snake_case , cache_dir=__snake_case , force_download=__snake_case , resume_download=__snake_case , proxies=__snake_case , use_auth_token=__snake_case , revision=__snake_case , local_files_only=__snake_case , ) if resolved_config_file is None: logger.info( '''Could not locate the feature extractor configuration file, will try to use the model config instead.''' ) return {} with open(__snake_case , encoding='''utf-8''' ) as reader: return json.load(__snake_case ) class __UpperCamelCase : def __init__( self ): """simple docstring""" raise EnvironmentError( '''AutoFeatureExtractor is designed to be instantiated ''' '''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' ) @classmethod @replace_list_option_in_docstrings(lowerCAmelCase ) def lowercase__ ( cls, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =kwargs.pop('''config''', lowerCAmelCase ) lowerCamelCase_ =kwargs.pop('''trust_remote_code''', lowerCAmelCase ) lowerCamelCase_ =True lowerCamelCase_, lowerCamelCase_ =FeatureExtractionMixin.get_feature_extractor_dict(lowerCAmelCase, **lowerCAmelCase ) lowerCamelCase_ =config_dict.get('''feature_extractor_type''', lowerCAmelCase ) lowerCamelCase_ =None if "AutoFeatureExtractor" in config_dict.get('''auto_map''', {} ): lowerCamelCase_ =config_dict['''auto_map''']['''AutoFeatureExtractor'''] # If we don't find the feature extractor class in the feature extractor config, let's try the model config. if feature_extractor_class is None and feature_extractor_auto_map is None: if not isinstance(lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase, **lowerCAmelCase ) # It could be in `config.feature_extractor_type`` lowerCamelCase_ =getattr(lowerCAmelCase, '''feature_extractor_type''', lowerCAmelCase ) if hasattr(lowerCAmelCase, '''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map: lowerCamelCase_ =config.auto_map['''AutoFeatureExtractor'''] if feature_extractor_class is not None: lowerCamelCase_ =feature_extractor_class_from_name(lowerCAmelCase ) lowerCamelCase_ =feature_extractor_auto_map is not None lowerCamelCase_ =feature_extractor_class is not None or type(lowerCAmelCase ) in FEATURE_EXTRACTOR_MAPPING lowerCamelCase_ =resolve_trust_remote_code( lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) if has_remote_code and trust_remote_code: lowerCamelCase_ =get_class_from_dynamic_module( lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ) lowerCamelCase_ =kwargs.pop('''code_revision''', lowerCAmelCase ) if os.path.isdir(lowerCAmelCase ): feature_extractor_class.register_for_auto_class() return feature_extractor_class.from_dict(lowerCAmelCase, **lowerCAmelCase ) elif feature_extractor_class is not None: return feature_extractor_class.from_dict(lowerCAmelCase, **lowerCAmelCase ) # Last try: we use the FEATURE_EXTRACTOR_MAPPING. elif type(lowerCAmelCase ) in FEATURE_EXTRACTOR_MAPPING: lowerCamelCase_ =FEATURE_EXTRACTOR_MAPPING[type(lowerCAmelCase )] return feature_extractor_class.from_dict(lowerCAmelCase, **lowerCAmelCase ) raise ValueError( f'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a ''' f'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following ''' f'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' ) @staticmethod def lowercase__ ( lowerCAmelCase, lowerCAmelCase ): """simple docstring""" FEATURE_EXTRACTOR_MAPPING.register(lowerCAmelCase, lowerCAmelCase )
75
'''simple docstring''' import argparse import csv import logging import os import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from tqdm import tqdm, trange from transformers import ( CONFIG_NAME, WEIGHTS_NAME, AdamW, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, get_linear_schedule_with_warmup, ) logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) a_ : Optional[int] = logging.getLogger(__name__) def a_ ( __snake_case : Union[str, Any] , __snake_case : Union[str, Any] ) -> str: """simple docstring""" lowerCamelCase_ =np.argmax(__snake_case , axis=1 ) return np.sum(outputs == labels ) def a_ ( __snake_case : List[Any] ) -> Tuple: """simple docstring""" with open(__snake_case , encoding='''utf_8''' ) as f: lowerCamelCase_ =csv.reader(__snake_case ) lowerCamelCase_ =[] next(__snake_case ) # skip the first line for line in tqdm(__snake_case ): output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) ) return output def a_ ( __snake_case : str , __snake_case : Dict , __snake_case : List[str] , __snake_case : List[str] , __snake_case : List[Any] , __snake_case : Dict ) -> Dict: """simple docstring""" lowerCamelCase_ =[] for dataset in encoded_datasets: lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =np.zeros((n_batch, 2, input_len) , dtype=np.intaa ) lowerCamelCase_ =np.zeros((n_batch, 2) , dtype=np.intaa ) lowerCamelCase_ =np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa ) lowerCamelCase_ =np.zeros((n_batch,) , dtype=np.intaa ) for ( i, (story, conta, conta, mc_label), ) in enumerate(__snake_case ): lowerCamelCase_ =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] lowerCamelCase_ =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] lowerCamelCase_ =with_conta lowerCamelCase_ =with_conta lowerCamelCase_ =len(__snake_case ) - 1 lowerCamelCase_ =len(__snake_case ) - 1 lowerCamelCase_ =with_conta lowerCamelCase_ =with_conta lowerCamelCase_ =mc_label lowerCamelCase_ =(input_ids, mc_token_ids, lm_labels, mc_labels) tensor_datasets.append(tuple(torch.tensor(__snake_case ) for t in all_inputs ) ) return tensor_datasets def a_ ( ) -> Optional[int]: """simple docstring""" lowerCamelCase_ =argparse.ArgumentParser() parser.add_argument('''--model_name''' , type=__snake_case , default='''openai-gpt''' , help='''pretrained model name''' ) parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' ) parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''' ) parser.add_argument( '''--output_dir''' , default=__snake_case , type=__snake_case , required=__snake_case , help='''The output directory where the model predictions and checkpoints will be written.''' , ) parser.add_argument('''--train_dataset''' , type=__snake_case , default='''''' ) parser.add_argument('''--eval_dataset''' , type=__snake_case , default='''''' ) parser.add_argument('''--seed''' , type=__snake_case , default=42 ) parser.add_argument('''--num_train_epochs''' , type=__snake_case , default=3 ) parser.add_argument('''--train_batch_size''' , type=__snake_case , default=8 ) parser.add_argument('''--eval_batch_size''' , type=__snake_case , default=16 ) parser.add_argument('''--adam_epsilon''' , default=1e-8 , type=__snake_case , help='''Epsilon for Adam optimizer.''' ) parser.add_argument('''--max_grad_norm''' , type=__snake_case , default=1 ) parser.add_argument( '''--max_steps''' , default=-1 , type=__snake_case , help=( '''If > 0: set total number of training steps to perform. Override num_train_epochs.''' ) , ) parser.add_argument( '''--gradient_accumulation_steps''' , type=__snake_case , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , ) parser.add_argument('''--learning_rate''' , type=__snake_case , default=6.25e-5 ) parser.add_argument('''--warmup_steps''' , default=0 , type=__snake_case , help='''Linear warmup over warmup_steps.''' ) parser.add_argument('''--lr_schedule''' , type=__snake_case , default='''warmup_linear''' ) parser.add_argument('''--weight_decay''' , type=__snake_case , default=0.0_1 ) parser.add_argument('''--lm_coef''' , type=__snake_case , default=0.9 ) parser.add_argument('''--n_valid''' , type=__snake_case , default=374 ) parser.add_argument('''--server_ip''' , type=__snake_case , default='''''' , help='''Can be used for distant debugging.''' ) parser.add_argument('''--server_port''' , type=__snake_case , default='''''' , help='''Can be used for distant debugging.''' ) lowerCamelCase_ =parser.parse_args() print(__snake_case ) if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('''Waiting for debugger attach''' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__snake_case ) ptvsd.wait_for_attach() random.seed(args.seed ) np.random.seed(args.seed ) torch.manual_seed(args.seed ) torch.cuda.manual_seed_all(args.seed ) lowerCamelCase_ =torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) lowerCamelCase_ =torch.cuda.device_count() logger.info('''device: {}, n_gpu {}'''.format(__snake_case , __snake_case ) ) if not args.do_train and not args.do_eval: raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' ) if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) # Load tokenizer and model # This loading functions also add new tokens and embeddings called `special tokens` # These new embeddings will be fine-tuned on the RocStories dataset lowerCamelCase_ =['''_start_''', '''_delimiter_''', '''_classify_'''] lowerCamelCase_ =OpenAIGPTTokenizer.from_pretrained(args.model_name ) tokenizer.add_tokens(__snake_case ) lowerCamelCase_ =tokenizer.convert_tokens_to_ids(__snake_case ) lowerCamelCase_ =OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name ) model.resize_token_embeddings(len(__snake_case ) ) model.to(__snake_case ) # Load and encode the datasets def tokenize_and_encode(__snake_case : Union[str, Any] ): if isinstance(__snake_case , __snake_case ): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(__snake_case ) ) elif isinstance(__snake_case , __snake_case ): return obj return [tokenize_and_encode(__snake_case ) for o in obj] logger.info('''Encoding dataset...''' ) lowerCamelCase_ =load_rocstories_dataset(args.train_dataset ) lowerCamelCase_ =load_rocstories_dataset(args.eval_dataset ) lowerCamelCase_ =(train_dataset, eval_dataset) lowerCamelCase_ =tokenize_and_encode(__snake_case ) # Compute the max input length for the Transformer lowerCamelCase_ =model.config.n_positions // 2 - 2 lowerCamelCase_ =max( len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3 for dataset in encoded_datasets for story, conta, conta, _ in dataset ) lowerCamelCase_ =min(__snake_case , model.config.n_positions ) # Max size of input for the pre-trained model # Prepare inputs tensors and dataloaders lowerCamelCase_ =pre_process_datasets(__snake_case , __snake_case , __snake_case , *__snake_case ) lowerCamelCase_, lowerCamelCase_ =tensor_datasets[0], tensor_datasets[1] lowerCamelCase_ =TensorDataset(*__snake_case ) lowerCamelCase_ =RandomSampler(__snake_case ) lowerCamelCase_ =DataLoader(__snake_case , sampler=__snake_case , batch_size=args.train_batch_size ) lowerCamelCase_ =TensorDataset(*__snake_case ) lowerCamelCase_ =SequentialSampler(__snake_case ) lowerCamelCase_ =DataLoader(__snake_case , sampler=__snake_case , batch_size=args.eval_batch_size ) # Prepare optimizer if args.do_train: if args.max_steps > 0: lowerCamelCase_ =args.max_steps lowerCamelCase_ =args.max_steps // (len(__snake_case ) // args.gradient_accumulation_steps) + 1 else: lowerCamelCase_ =len(__snake_case ) // args.gradient_accumulation_steps * args.num_train_epochs lowerCamelCase_ =list(model.named_parameters() ) lowerCamelCase_ =['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight'''] lowerCamelCase_ =[ { '''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )], '''weight_decay''': args.weight_decay, }, {'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0}, ] lowerCamelCase_ =AdamW(__snake_case , lr=args.learning_rate , eps=args.adam_epsilon ) lowerCamelCase_ =get_linear_schedule_with_warmup( __snake_case , num_warmup_steps=args.warmup_steps , num_training_steps=__snake_case ) if args.do_train: lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =0, 0, None model.train() for _ in trange(int(args.num_train_epochs ) , desc='''Epoch''' ): lowerCamelCase_ =0 lowerCamelCase_ =0 lowerCamelCase_ =tqdm(__snake_case , desc='''Training''' ) for step, batch in enumerate(__snake_case ): lowerCamelCase_ =tuple(t.to(__snake_case ) for t in batch ) lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =batch lowerCamelCase_ =model(__snake_case , mc_token_ids=__snake_case , lm_labels=__snake_case , mc_labels=__snake_case ) lowerCamelCase_ =args.lm_coef * losses[0] + losses[1] loss.backward() optimizer.step() scheduler.step() optimizer.zero_grad() tr_loss += loss.item() lowerCamelCase_ =( loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item() ) nb_tr_steps += 1 lowerCamelCase_ ='''Training loss: {:.2e} lr: {:.2e}'''.format(__snake_case , scheduler.get_lr()[0] ) # Save a trained model if args.do_train: # Save a trained model, configuration and tokenizer lowerCamelCase_ =model.module if hasattr(__snake_case , '''module''' ) else model # Only save the model itself # If we save using the predefined names, we can load using `from_pretrained` lowerCamelCase_ =os.path.join(args.output_dir , __snake_case ) lowerCamelCase_ =os.path.join(args.output_dir , __snake_case ) torch.save(model_to_save.state_dict() , __snake_case ) model_to_save.config.to_json_file(__snake_case ) tokenizer.save_vocabulary(args.output_dir ) # Load a trained model and vocabulary that you have fine-tuned lowerCamelCase_ =OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir ) lowerCamelCase_ =OpenAIGPTTokenizer.from_pretrained(args.output_dir ) model.to(__snake_case ) if args.do_eval: model.eval() lowerCamelCase_, lowerCamelCase_ =0, 0 lowerCamelCase_, lowerCamelCase_ =0, 0 for batch in tqdm(__snake_case , desc='''Evaluating''' ): lowerCamelCase_ =tuple(t.to(__snake_case ) for t in batch ) lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =batch with torch.no_grad(): lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =model( __snake_case , mc_token_ids=__snake_case , lm_labels=__snake_case , mc_labels=__snake_case ) lowerCamelCase_ =mc_logits.detach().cpu().numpy() lowerCamelCase_ =mc_labels.to('''cpu''' ).numpy() lowerCamelCase_ =accuracy(__snake_case , __snake_case ) eval_loss += mc_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0 ) nb_eval_steps += 1 lowerCamelCase_ =eval_loss / nb_eval_steps lowerCamelCase_ =eval_accuracy / nb_eval_examples lowerCamelCase_ =tr_loss / nb_tr_steps if args.do_train else None lowerCamelCase_ ={'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss} lowerCamelCase_ =os.path.join(args.output_dir , '''eval_results.txt''' ) with open(__snake_case , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key in sorted(result.keys() ): logger.info(''' %s = %s''' , __snake_case , str(result[key] ) ) writer.write('''%s = %s\n''' % (key, str(result[key] )) ) if __name__ == "__main__": main()
75
1
'''simple docstring''' from collections import UserDict from typing import Union import numpy as np import requests from ..utils import ( add_end_docstrings, logging, ) from .audio_classification import ffmpeg_read from .base import PIPELINE_INIT_ARGS, Pipeline a_ : Any = logging.get_logger(__name__) @add_end_docstrings(lowerCamelCase__ ) class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, **lowerCAmelCase ): """simple docstring""" super().__init__(**lowerCAmelCase ) if self.framework != "pt": raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' ) # No specific FOR_XXX available yet def __call__( self, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return super().__call__(lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ ={} if "candidate_labels" in kwargs: lowerCamelCase_ =kwargs['''candidate_labels'''] if "hypothesis_template" in kwargs: lowerCamelCase_ =kwargs['''hypothesis_template'''] return preprocess_params, {}, {} def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None, lowerCAmelCase="This is a sound of {}." ): """simple docstring""" if isinstance(lowerCAmelCase, lowerCAmelCase ): if audio.startswith('''http://''' ) or audio.startswith('''https://''' ): # We need to actually check for a real protocol, otherwise it's impossible to use a local file # like http_huggingface_co.png lowerCamelCase_ =requests.get(lowerCAmelCase ).content else: with open(lowerCAmelCase, '''rb''' ) as f: lowerCamelCase_ =f.read() if isinstance(lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =ffmpeg_read(lowerCAmelCase, self.feature_extractor.sampling_rate ) if not isinstance(lowerCAmelCase, np.ndarray ): raise ValueError('''We expect a numpy ndarray as input''' ) if len(audio.shape ) != 1: raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' ) lowerCamelCase_ =self.feature_extractor( [audio], sampling_rate=self.feature_extractor.sampling_rate, return_tensors='''pt''' ) lowerCamelCase_ =candidate_labels lowerCamelCase_ =[hypothesis_template.format(lowerCAmelCase ) for x in candidate_labels] lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=self.framework, padding=lowerCAmelCase ) lowerCamelCase_ =[text_inputs] return inputs def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =model_inputs.pop('''candidate_labels''' ) lowerCamelCase_ =model_inputs.pop('''text_inputs''' ) if isinstance(text_inputs[0], lowerCAmelCase ): lowerCamelCase_ =text_inputs[0] else: # Batching case. lowerCamelCase_ =text_inputs[0][0] lowerCamelCase_ =self.model(**lowerCAmelCase, **lowerCAmelCase ) lowerCamelCase_ ={ '''candidate_labels''': candidate_labels, '''logits''': outputs.logits_per_audio, } return model_outputs def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =model_outputs.pop('''candidate_labels''' ) lowerCamelCase_ =model_outputs['''logits'''][0] if self.framework == "pt": lowerCamelCase_ =logits.softmax(dim=0 ) lowerCamelCase_ =probs.tolist() else: raise ValueError('''`tf` framework not supported.''' ) lowerCamelCase_ =[ {'''score''': score, '''label''': candidate_label} for score, candidate_label in sorted(zip(lowerCAmelCase, lowerCAmelCase ), key=lambda lowerCAmelCase : -x[0] ) ] return result
75
'''simple docstring''' import copy import os import cva import numpy as np from matplotlib import pyplot as plt class __UpperCamelCase : def __init__( self ): """simple docstring""" lowerCamelCase_ ='''''' lowerCamelCase_ ='''''' lowerCamelCase_ =[] lowerCamelCase_ =0 lowerCamelCase_ =256 lowerCamelCase_ =0 lowerCamelCase_ =0 lowerCamelCase_ =0 lowerCamelCase_ =0 def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =cva.imread(lowerCAmelCase, 0 ) lowerCamelCase_ =copy.deepcopy(self.img ) lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =plt.hist(self.img.ravel(), 256, [0, 256], label='''x''' ) lowerCamelCase_ =np.sum(lowerCAmelCase ) for i in range(len(lowerCAmelCase ) ): lowerCamelCase_ =x[i] / self.k self.sk += prk lowerCamelCase_ =(self.L - 1) * self.sk if self.rem != 0: lowerCamelCase_ =int(last % last ) lowerCamelCase_ =int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(lowerCAmelCase ) lowerCamelCase_ =int(np.ma.count(self.img ) / self.img[1].size ) lowerCamelCase_ =self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): lowerCamelCase_ =self.img[j][i] if num != self.last_list[num]: lowerCamelCase_ =self.last_list[num] cva.imwrite('''output_data/output.jpg''', self.img ) def lowercase__ ( self ): """simple docstring""" plt.hist(self.img.ravel(), 256, [0, 256] ) def lowercase__ ( self ): """simple docstring""" cva.imshow('''Output-Image''', self.img ) cva.imshow('''Input-Image''', self.original_image ) cva.waitKey(5_000 ) cva.destroyAllWindows() if __name__ == "__main__": a_ : str = os.path.join(os.path.basename(__file__), """image_data/input.jpg""") a_ : Optional[Any] = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
75
1
'''simple docstring''' import argparse import os import re import torch from flax.traverse_util import flatten_dict from tax import checkpoints from transformers import ( AutoTokenizer, PixaStructConfig, PixaStructForConditionalGeneration, PixaStructImageProcessor, PixaStructProcessor, PixaStructTextConfig, PixaStructVisionConfig, ) def a_ ( __snake_case : Any ) -> int: """simple docstring""" lowerCamelCase_ =checkpoints.load_tax_checkpoint(__snake_case ) lowerCamelCase_ =flatten_dict(__snake_case ) return flax_params def a_ ( __snake_case : Dict ) -> Optional[int]: """simple docstring""" lowerCamelCase_ ={} lowerCamelCase_ ={ '''token_embedder''': '''embeddings''', '''encoder_norm''': '''layernorm''', '''kernel''': '''weight''', '''.out''': '''.output''', '''scale''': '''weight''', '''embedders_0.pos_embedding''': '''row_embedder.weight''', '''embedders_1.pos_embedding''': '''column_embedder.weight''', } lowerCamelCase_ ={ '''query''': '''attention.query''', '''key''': '''attention.key''', '''value''': '''attention.value''', '''output.dense''': '''output''', '''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''', '''pre_self_attention_layer_norm''': '''self_attention.layer_norm''', '''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''', '''mlp.''': '''mlp.DenseReluDense.''', '''pre_mlp_layer_norm''': '''mlp.layer_norm''', '''self_attention.o''': '''self_attention.attention.o''', '''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''', '''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''', '''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''', '''decoder.logits_dense.weight''': '''decoder.lm_head.weight''', } for key in flax_dict.keys(): if "target" in key: # remove the first prefix from the key lowerCamelCase_ ='''.'''.join(key[1:] ) # rename the key for old, new in CONVERSION_MAPPING.items(): lowerCamelCase_ =new_key.replace(__snake_case , __snake_case ) if "decoder" in new_key: for old, new in DECODER_CONVERSION_MAPPING.items(): lowerCamelCase_ =new_key.replace(__snake_case , __snake_case ) if "layers" in new_key and "decoder" not in new_key: # use regex to replace the layer number lowerCamelCase_ =re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , __snake_case ) lowerCamelCase_ =new_key.replace('''encoder''' , '''encoder.encoder''' ) elif "layers" in new_key and "decoder" in new_key: # use regex to replace the layer number lowerCamelCase_ =re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , __snake_case ) lowerCamelCase_ =flax_dict[key] lowerCamelCase_ ={} # convert converted_dict into torch format for key in converted_dict.keys(): if ("embed_tokens" not in key) and ("embedder" not in key): lowerCamelCase_ =torch.from_numpy(converted_dict[key].T ) else: lowerCamelCase_ =torch.from_numpy(converted_dict[key] ) return converted_torch_dict def a_ ( __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Any=False , __snake_case : Optional[int]=False ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ =get_flax_param(__snake_case ) if not use_large: lowerCamelCase_ =PixaStructVisionConfig() lowerCamelCase_ =PixaStructTextConfig() else: lowerCamelCase_ =PixaStructVisionConfig( hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 ) lowerCamelCase_ =PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 ) lowerCamelCase_ =PixaStructConfig( vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=__snake_case ) lowerCamelCase_ =PixaStructForConditionalGeneration(__snake_case ) lowerCamelCase_ =rename_and_convert_flax_params(__snake_case ) model.load_state_dict(__snake_case ) lowerCamelCase_ =AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' ) lowerCamelCase_ =PixaStructImageProcessor() lowerCamelCase_ =PixaStructProcessor(image_processor=__snake_case , tokenizer=__snake_case ) if use_large: lowerCamelCase_ =4096 lowerCamelCase_ =True # mkdir if needed os.makedirs(__snake_case , exist_ok=__snake_case ) model.save_pretrained(__snake_case ) processor.save_pretrained(__snake_case ) print('''Model saved in {}'''.format(__snake_case ) ) if __name__ == "__main__": a_ : Optional[int] = argparse.ArgumentParser() parser.add_argument("""--t5x_checkpoint_path""", default=None, type=str, help="""Path to the original T5x checkpoint.""") parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--use_large""", action="""store_true""", help="""Use large model.""") parser.add_argument("""--is_vqa""", action="""store_true""", help="""Use large model.""") a_ : Tuple = parser.parse_args() convert_pixastruct_original_pytorch_checkpoint_to_hf( args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large )
75
'''simple docstring''' from collections import UserDict from typing import Union import numpy as np import requests from ..utils import ( add_end_docstrings, logging, ) from .audio_classification import ffmpeg_read from .base import PIPELINE_INIT_ARGS, Pipeline a_ : Any = logging.get_logger(__name__) @add_end_docstrings(lowerCamelCase__ ) class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, **lowerCAmelCase ): """simple docstring""" super().__init__(**lowerCAmelCase ) if self.framework != "pt": raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' ) # No specific FOR_XXX available yet def __call__( self, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return super().__call__(lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ ={} if "candidate_labels" in kwargs: lowerCamelCase_ =kwargs['''candidate_labels'''] if "hypothesis_template" in kwargs: lowerCamelCase_ =kwargs['''hypothesis_template'''] return preprocess_params, {}, {} def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None, lowerCAmelCase="This is a sound of {}." ): """simple docstring""" if isinstance(lowerCAmelCase, lowerCAmelCase ): if audio.startswith('''http://''' ) or audio.startswith('''https://''' ): # We need to actually check for a real protocol, otherwise it's impossible to use a local file # like http_huggingface_co.png lowerCamelCase_ =requests.get(lowerCAmelCase ).content else: with open(lowerCAmelCase, '''rb''' ) as f: lowerCamelCase_ =f.read() if isinstance(lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =ffmpeg_read(lowerCAmelCase, self.feature_extractor.sampling_rate ) if not isinstance(lowerCAmelCase, np.ndarray ): raise ValueError('''We expect a numpy ndarray as input''' ) if len(audio.shape ) != 1: raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' ) lowerCamelCase_ =self.feature_extractor( [audio], sampling_rate=self.feature_extractor.sampling_rate, return_tensors='''pt''' ) lowerCamelCase_ =candidate_labels lowerCamelCase_ =[hypothesis_template.format(lowerCAmelCase ) for x in candidate_labels] lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=self.framework, padding=lowerCAmelCase ) lowerCamelCase_ =[text_inputs] return inputs def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =model_inputs.pop('''candidate_labels''' ) lowerCamelCase_ =model_inputs.pop('''text_inputs''' ) if isinstance(text_inputs[0], lowerCAmelCase ): lowerCamelCase_ =text_inputs[0] else: # Batching case. lowerCamelCase_ =text_inputs[0][0] lowerCamelCase_ =self.model(**lowerCAmelCase, **lowerCAmelCase ) lowerCamelCase_ ={ '''candidate_labels''': candidate_labels, '''logits''': outputs.logits_per_audio, } return model_outputs def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =model_outputs.pop('''candidate_labels''' ) lowerCamelCase_ =model_outputs['''logits'''][0] if self.framework == "pt": lowerCamelCase_ =logits.softmax(dim=0 ) lowerCamelCase_ =probs.tolist() else: raise ValueError('''`tf` framework not supported.''' ) lowerCamelCase_ =[ {'''score''': score, '''label''': candidate_label} for score, candidate_label in sorted(zip(lowerCAmelCase, lowerCAmelCase ), key=lambda lowerCAmelCase : -x[0] ) ] return result
75
1
'''simple docstring''' class __UpperCamelCase : def __init__( self ): """simple docstring""" lowerCamelCase_ ={} def lowercase__ ( self ): """simple docstring""" print(self.vertex ) for i in self.vertex: print(lowerCAmelCase, ''' -> ''', ''' -> '''.join([str(lowerCAmelCase ) for j in self.vertex[i]] ) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" if from_vertex in self.vertex: self.vertex[from_vertex].append(lowerCAmelCase ) else: # else make a new vertex lowerCamelCase_ =[to_vertex] def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =[False] * len(self.vertex ) # call the recursive helper function for i in range(len(self.vertex ) ): if not visited[i]: self.dfs_recursive(lowerCAmelCase, lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =True print(lowerCAmelCase, end=''' ''' ) # Recur for all the vertices that are adjacent to this node for i in self.vertex: if not visited[i]: self.dfs_recursive(lowerCAmelCase, lowerCAmelCase ) if __name__ == "__main__": a_ : int = Graph() g.add_edge(0, 1) g.add_edge(0, 2) g.add_edge(1, 2) g.add_edge(2, 0) g.add_edge(2, 3) g.add_edge(3, 3) g.print_graph() print("""DFS:""") g.dfs() # OUTPUT: # 0 -> 1 -> 2 # 1 -> 2 # 2 -> 0 -> 3 # 3 -> 3 # DFS: # 0 1 2 3
75
'''simple docstring''' import unittest from pathlib import Path from shutil import copyfile from transformers import SPIECE_UNDERLINE, is_sentencepiece_available from transformers.models.speech_to_text import SpeechaTextTokenizer from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin a_ : str = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_sentencepiece_available(): import sentencepiece as sp a_ : Optional[Any] = 5 a_ : str = 10 @require_sentencepiece @require_tokenizers class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ): lowercase : int =SpeechaTextTokenizer lowercase : int =False lowercase : List[str] =True def lowercase__ ( self ): """simple docstring""" super().setUp() lowerCamelCase_ =sp.SentencePieceProcessor() spm_model.Load(lowerCAmelCase ) lowerCamelCase_ =['''<s>''', '''<pad>''', '''</s>''', '''<unk>'''] vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(lowerCAmelCase ) )] lowerCamelCase_ =dict(zip(lowerCAmelCase, range(len(lowerCAmelCase ) ) ) ) lowerCamelCase_ =Path(self.tmpdirname ) save_json(lowerCAmelCase, save_dir / VOCAB_FILES_NAMES['''vocab_file'''] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(lowerCAmelCase, save_dir / VOCAB_FILES_NAMES['''spm_file'''] ) lowerCamelCase_ =SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''<pad>''' lowerCamelCase_ =1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase ), lowerCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase ), lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0], '''<s>''' ) self.assertEqual(vocab_keys[1], '''<pad>''' ) self.assertEqual(vocab_keys[-1], '''j''' ) self.assertEqual(len(lowerCAmelCase ), 1_001 ) def lowercase__ ( self ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size, 1_001 ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) lowerCamelCase_ =tokenizer.tokenize('''This is a test''' ) self.assertListEqual(lowerCAmelCase, ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCAmelCase ), [289, 50, 14, 174, 386], ) lowerCamelCase_ =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( lowerCAmelCase, [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''], ) lowerCamelCase_ =tokenizer.convert_tokens_to_ids(lowerCAmelCase ) self.assertListEqual(lowerCAmelCase, [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] ) lowerCamelCase_ =tokenizer.convert_ids_to_tokens(lowerCAmelCase ) self.assertListEqual( lowerCAmelCase, [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''], ) @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ={'''input_ids''': [[3_791, 797, 31, 11, 64, 797, 31, 2_429, 433, 12, 1_176, 12, 20, 786, 915, 142, 2_413, 240, 37, 3_238, 797, 31, 11, 35, 93, 915, 142, 2_413, 240, 37, 5_540, 567, 1_276, 93, 37, 610, 40, 62, 455, 657, 1_042, 123, 780, 177, 37, 309, 241, 1_298, 514, 20, 292, 2_737, 114, 2_469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3_388, 511, 459, 4, 3_555, 40, 321, 302, 705, 4, 3_388, 511, 583, 326, 5, 5, 5, 62, 3_310, 560, 177, 2_680, 217, 1_508, 32, 31, 853, 418, 64, 583, 511, 1_605, 62, 35, 93, 560, 177, 2_680, 217, 1_508, 1_521, 64, 583, 511, 519, 62, 20, 1_515, 764, 20, 149, 261, 5_625, 7_972, 20, 5_540, 567, 1_276, 93, 3_925, 1_675, 11, 15, 802, 7_972, 576, 217, 1_508, 11, 35, 93, 1_253, 2_441, 15, 289, 652, 31, 416, 321, 3_842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2_681, 1_153, 3_434, 20, 5_540, 37, 567, 126, 1_253, 2_441, 3_376, 449, 210, 431, 1_563, 177, 767, 5_540, 11, 1_203, 472, 11, 2_953, 685, 285, 364, 706, 1_153, 20, 6_799, 20, 2_869, 20, 4_464, 126, 40, 2_429, 20, 1_040, 866, 2_664, 418, 20, 318, 20, 1_726, 186, 20, 265, 522, 35, 93, 2_191, 4_634, 20, 1_040, 12, 6_799, 15, 228, 2_356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_575, 2_666, 684, 1_582, 1_176, 12, 627, 149, 619, 20, 4_902, 563, 11, 20, 149, 261, 3_420, 2_356, 174, 142, 4_714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase, model_name='''facebook/s2t-small-mustc-en-de-st''', revision='''a14f04cf0776c02f62a8cb800cf7909e15ea23ad''', ) @require_sentencepiece class __UpperCamelCase ( unittest.TestCase ): lowercase : Tuple ='valhalla/s2t_mustc_multilinguial_medium' lowercase : Dict ='C\'est trop cool' lowercase : str ='Esto es genial' @classmethod def lowercase__ ( cls ): """simple docstring""" lowerCamelCase_ =SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name ) return cls def lowercase__ ( self ): """simple docstring""" self.assertEqual(self.tokenizer.lang_code_to_id['''pt'''], 4 ) self.assertEqual(self.tokenizer.lang_code_to_id['''ru'''], 6 ) self.assertEqual(self.tokenizer.lang_code_to_id['''it'''], 9 ) self.assertEqual(self.tokenizer.lang_code_to_id['''de'''], 11 ) def lowercase__ ( self ): """simple docstring""" self.assertEqual(self.tokenizer.vocab_size, 10_000 ) def lowercase__ ( self ): """simple docstring""" self.assertIn(lowerCAmelCase, self.tokenizer.all_special_ids ) lowerCamelCase_ =[ES_CODE, 4, 1_601, 47, 7_647, 2] lowerCamelCase_ =self.tokenizer.decode(lowerCAmelCase, skip_special_tokens=lowerCAmelCase ) lowerCamelCase_ =self.tokenizer.decode(generated_ids[1:], skip_special_tokens=lowerCAmelCase ) self.assertEqual(lowerCAmelCase, lowerCAmelCase ) self.assertNotIn(self.tokenizer.eos_token, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''fr''' lowerCamelCase_ =self.tokenizer(self.french_text ).input_ids self.assertEqual(encoded[0], lowerCAmelCase ) self.assertEqual(encoded[-1], self.tokenizer.eos_token_id ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''fr''' self.assertListEqual(self.tokenizer.prefix_tokens, [FR_CODE] ) lowerCamelCase_ ='''es''' self.assertListEqual(self.tokenizer.prefix_tokens, [ES_CODE] )
75
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) a_ : Optional[int] = { """configuration_lxmert""": ["""LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LxmertConfig"""], """tokenization_lxmert""": ["""LxmertTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Dict = ["""LxmertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Union[str, Any] = [ """LxmertEncoder""", """LxmertForPreTraining""", """LxmertForQuestionAnswering""", """LxmertModel""", """LxmertPreTrainedModel""", """LxmertVisualFeatureEncoder""", """LxmertXLayer""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[Any] = [ """TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFLxmertForPreTraining""", """TFLxmertMainLayer""", """TFLxmertModel""", """TFLxmertPreTrainedModel""", """TFLxmertVisualFeatureEncoder""", ] if TYPE_CHECKING: from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig from .tokenization_lxmert import LxmertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_lxmert_fast import LxmertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lxmert import ( LxmertEncoder, LxmertForPreTraining, LxmertForQuestionAnswering, LxmertModel, LxmertPreTrainedModel, LxmertVisualFeatureEncoder, LxmertXLayer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_lxmert import ( TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFLxmertForPreTraining, TFLxmertMainLayer, TFLxmertModel, TFLxmertPreTrainedModel, TFLxmertVisualFeatureEncoder, ) else: import sys a_ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
75
'''simple docstring''' import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch) # also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml # same for Vicuna-13b from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipImageProcessor, InstructBlipConfig, InstructBlipForConditionalGeneration, InstructBlipProcessor, InstructBlipQFormerConfig, InstructBlipVisionConfig, LlamaConfig, LlamaTokenizerFast, TaConfig, TaTokenizerFast, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def a_ ( ) -> Dict: """simple docstring""" lowerCamelCase_ ='''https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg''' lowerCamelCase_ =Image.open(requests.get(__snake_case , stream=__snake_case ).raw ).convert('''RGB''' ) return image def a_ ( __snake_case : Tuple ) -> Dict: """simple docstring""" lowerCamelCase_ =[] # fmt: off # vision encoder rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') ) rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') ) rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') ) rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') ) rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') ) rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') ) # QFormer rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.embeddings.layernorm.weight''') ) rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.embeddings.layernorm.bias''') ) # fmt: on return rename_keys def a_ ( __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : List[Any] ) -> Dict: """simple docstring""" lowerCamelCase_ =dct.pop(__snake_case ) lowerCamelCase_ =val def a_ ( __snake_case : str , __snake_case : Optional[Any] ) -> Any: """simple docstring""" for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases lowerCamelCase_ =state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' ) lowerCamelCase_ =state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' ) # next, set bias in the state dict lowerCamelCase_ =torch.cat((q_bias, torch.zeros_like(__snake_case , requires_grad=__snake_case ), v_bias) ) lowerCamelCase_ =qkv_bias def a_ ( __snake_case : List[str] ) -> Any: """simple docstring""" lowerCamelCase_ =364 if '''coco''' in model_name else 224 lowerCamelCase_ =InstructBlipVisionConfig(image_size=__snake_case ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "t5-xl" in model_name: lowerCamelCase_ =TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: lowerCamelCase_ =TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict() elif "vicuna-7b" in model_name: lowerCamelCase_ =LlamaConfig.from_pretrained('''decapoda-research/llama-7b-hf''' , vocab_size=3_2001 ).to_dict() elif "vicuna-13b" in model_name: lowerCamelCase_ =LlamaConfig.from_pretrained('''decapoda-research/llama-13b-hf''' , vocab_size=3_2001 ).to_dict() else: raise ValueError('''Model name not supported''' ) # the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1 lowerCamelCase_ =InstructBlipQFormerConfig(vocab_size=3_0523 ).to_dict() lowerCamelCase_ =InstructBlipConfig(vision_config=__snake_case , text_config=__snake_case , qformer_config=__snake_case ) return config, image_size @torch.no_grad() def a_ ( __snake_case : Optional[int] , __snake_case : str=None , __snake_case : List[Any]=False ) -> List[str]: """simple docstring""" lowerCamelCase_ =AutoTokenizer.from_pretrained('''bert-base-uncased''' , truncation_side='''left''' ) qformer_tokenizer.add_special_tokens({'''bos_token''': '''[DEC]'''} ) if "t5" in model_name: lowerCamelCase_ =TaTokenizerFast.from_pretrained('''google/flan-t5-xl''' , truncation_side='''left''' ) elif "vicuna" in model_name: # the following was used in the original implementation: # tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left") # tokenizer.add_special_tokens({"pad_token": "[PAD]"}) # tokenizer.add_special_tokens({"bos_token": "</s>"}) # tokenizer.add_special_tokens({"eos_token": "</s>"}) # tokenizer.add_special_tokens({"unk_token": "</s>"}) lowerCamelCase_ =LlamaTokenizerFast.from_pretrained( '''huggyllama/llama-7b''' , truncation_side='''left''' , bos_token='''</s>''' , unk_token='''</s>''' ) tokenizer.add_special_tokens({'''pad_token''': '''[PAD]'''} ) lowerCamelCase_, lowerCamelCase_ =get_blipa_config(__snake_case ) lowerCamelCase_ =InstructBlipForConditionalGeneration(__snake_case ).eval() lowerCamelCase_ ={ '''instructblip-vicuna-7b''': ('''blip2_vicuna_instruct''', '''vicuna7b'''), '''instructblip-vicuna-13b''': ('''blip2_vicuna_instruct''', '''vicuna13b'''), '''instructblip-flan-t5-xl''': ('''blip2_t5_instruct''', '''flant5xl'''), '''instructblip-flan-t5-xxl''': ('''blip2_t5_instruct''', '''flant5xxl'''), } lowerCamelCase_, lowerCamelCase_ =model_name_to_original[model_name] # load original model print('''Loading original model...''' ) lowerCamelCase_ ='''cuda:1''' if torch.cuda.is_available() else '''cpu''' lowerCamelCase_ ='''cuda:2''' if torch.cuda.is_available() else '''cpu''' lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =load_model_and_preprocess( name=__snake_case , model_type=__snake_case , is_eval=__snake_case , device=__snake_case ) original_model.eval() print('''Done!''' ) # update state dict keys lowerCamelCase_ =original_model.state_dict() lowerCamelCase_ =create_rename_keys(__snake_case ) for src, dest in rename_keys: rename_key(__snake_case , __snake_case , __snake_case ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): lowerCamelCase_ =state_dict.pop(__snake_case ) if key.startswith('''Qformer.bert''' ): lowerCamelCase_ =key.replace('''Qformer.bert''' , '''qformer''' ) if "attention.self" in key: lowerCamelCase_ =key.replace('''self''' , '''attention''' ) if "llm_proj" in key: lowerCamelCase_ =key.replace('''llm_proj''' , '''language_projection''' ) if "t5_proj" in key: lowerCamelCase_ =key.replace('''t5_proj''' , '''language_projection''' ) if key.startswith('''llm_model''' ): lowerCamelCase_ =key.replace('''llm_model''' , '''language_model''' ) if key.startswith('''t5''' ): lowerCamelCase_ =key.replace('''t5''' , '''language''' ) lowerCamelCase_ =val # read in qv biases read_in_q_v_bias(__snake_case , __snake_case ) # note: weights get loaded in torch.float32 by default hf_model.load_state_dict(__snake_case , strict=__snake_case ) lowerCamelCase_ =load_demo_image() lowerCamelCase_ ='''What is unusual about this image?''' # create processor lowerCamelCase_ =BlipImageProcessor( size={'''height''': image_size, '''width''': image_size} , image_mean=__snake_case , image_std=__snake_case ) lowerCamelCase_ =InstructBlipProcessor( image_processor=__snake_case , tokenizer=__snake_case , qformer_tokenizer=__snake_case , ) lowerCamelCase_ =processor(images=__snake_case , text=__snake_case , return_tensors='''pt''' ).to(__snake_case ) # make sure processor creates exact same pixel values lowerCamelCase_ =vis_processors['''eval'''](__snake_case ).unsqueeze(0 ).to(__snake_case ) lowerCamelCase_ =inputs.pixel_values assert torch.allclose(original_pixel_values.to(pixel_values.device ) , __snake_case ) original_model.to(__snake_case ) hf_model.to(__snake_case ) with torch.no_grad(): if "vicuna" in model_name: lowerCamelCase_ =original_model({'''image''': original_pixel_values, '''text_input''': [prompt]} ).logits lowerCamelCase_ =hf_model(**__snake_case ).logits else: lowerCamelCase_ =original_model( {'''image''': original_pixel_values, '''text_input''': [prompt], '''text_output''': ['''\n''']} ).logits lowerCamelCase_ =tokenizer('''\n''' , return_tensors='''pt''' ).input_ids.to(__snake_case ) lowerCamelCase_ =label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 ) lowerCamelCase_ =hf_model(**__snake_case , labels=__snake_case ).logits print('''First values of original logits:''' , original_logits[0, :3, :3] ) print('''First values of HF logits:''' , logits[0, :3, :3] ) # assert values assert original_logits.shape == logits.shape lowerCamelCase_ =1e-4 if '''vicuna''' in model_name else 1e-5 assert torch.allclose(original_logits.to(logits.device ) , __snake_case , atol=__snake_case ) print('''Looks ok!''' ) print('''Generating with original model...''' ) lowerCamelCase_ =original_model.generate({'''image''': original_pixel_values, '''prompt''': prompt} , num_beams=5 ) # important: we need to cast the weights of the HF model to the appropriate type print('''Generating with HF model...''' ) lowerCamelCase_ =hf_model.generate( **__snake_case , do_sample=__snake_case , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , ) if "vicuna" in model_name: # convert output id 0 to 2 (eos_token_id) # TODO add this in the generate method? lowerCamelCase_ =2 print('''Original generation:''' , __snake_case ) lowerCamelCase_ =processor.batch_decode(__snake_case , skip_special_tokens=__snake_case ) lowerCamelCase_ =[text.strip() for text in output_text] print('''HF generation:''' , __snake_case ) if pytorch_dump_folder_path is not None: processor.save_pretrained(__snake_case ) hf_model.save_pretrained(__snake_case ) if push_to_hub: processor.push_to_hub(F'''Salesforce/{model_name}''' ) hf_model.push_to_hub(F'''Salesforce/{model_name}''' ) if __name__ == "__main__": a_ : Any = argparse.ArgumentParser() a_ : Any = [ """instructblip-vicuna-7b""", """instructblip-vicuna-13b""", """instructblip-flan-t5-xl""", """instructblip-flan-t5-xxl""", ] parser.add_argument( """--model_name""", default="""instructblip-flan-t5-xl""", choices=choices, type=str, help="""Path to hf config.json of model to convert""", ) parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model and processor to the hub after converting""", ) a_ : str = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
75
1
'''simple docstring''' import string import numpy def a_ ( __snake_case : int , __snake_case : int ) -> int: """simple docstring""" return b if a == 0 else greatest_common_divisor(b % a , __snake_case ) class __UpperCamelCase : lowercase : Optional[int] =string.ascii_uppercase + string.digits # This cipher takes alphanumerics into account # i.e. a total of 36 characters # take x and return x % len(key_string) lowercase : Any =numpy.vectorize(lambda lowerCamelCase__ : x % 36 ) lowercase : List[str] =numpy.vectorize(lowerCamelCase__ ) def __init__( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.modulus(lowerCAmelCase ) # mod36 calc's on the encrypt key self.check_determinant() # validate the determinant of the encryption key lowerCamelCase_ =encrypt_key.shape[0] def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return self.key_string.index(lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return self.key_string[round(lowerCAmelCase )] def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: lowerCamelCase_ =det % len(self.key_string ) lowerCamelCase_ =len(self.key_string ) if greatest_common_divisor(lowerCAmelCase, len(self.key_string ) ) != 1: lowerCamelCase_ =( f'''determinant modular {req_l} of encryption key({det}) ''' f'''is not co prime w.r.t {req_l}.\nTry another key.''' ) raise ValueError(lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =[char for char in text.upper() if char in self.key_string] lowerCamelCase_ =chars[-1] while len(lowerCAmelCase ) % self.break_key != 0: chars.append(lowerCAmelCase ) return "".join(lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.process_text(text.upper() ) lowerCamelCase_ ='''''' for i in range(0, len(lowerCAmelCase ) - self.break_key + 1, self.break_key ): lowerCamelCase_ =text[i : i + self.break_key] lowerCamelCase_ =[self.replace_letters(lowerCAmelCase ) for char in batch] lowerCamelCase_ =numpy.array([vec] ).T lowerCamelCase_ =self.modulus(self.encrypt_key.dot(lowerCAmelCase ) ).T.tolist()[ 0 ] lowerCamelCase_ =''''''.join( self.replace_digits(lowerCAmelCase ) for num in batch_encrypted ) encrypted += encrypted_batch return encrypted def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: lowerCamelCase_ =det % len(self.key_string ) lowerCamelCase_ =None for i in range(len(self.key_string ) ): if (det * i) % len(self.key_string ) == 1: lowerCamelCase_ =i break lowerCamelCase_ =( det_inv * numpy.linalg.det(self.encrypt_key ) * numpy.linalg.inv(self.encrypt_key ) ) return self.to_int(self.modulus(lowerCAmelCase ) ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.make_decrypt_key() lowerCamelCase_ =self.process_text(text.upper() ) lowerCamelCase_ ='''''' for i in range(0, len(lowerCAmelCase ) - self.break_key + 1, self.break_key ): lowerCamelCase_ =text[i : i + self.break_key] lowerCamelCase_ =[self.replace_letters(lowerCAmelCase ) for char in batch] lowerCamelCase_ =numpy.array([vec] ).T lowerCamelCase_ =self.modulus(decrypt_key.dot(lowerCAmelCase ) ).T.tolist()[0] lowerCamelCase_ =''''''.join( self.replace_digits(lowerCAmelCase ) for num in batch_decrypted ) decrypted += decrypted_batch return decrypted def a_ ( ) -> None: """simple docstring""" lowerCamelCase_ =int(input('''Enter the order of the encryption key: ''' ) ) lowerCamelCase_ =[] print('''Enter each row of the encryption key with space separated integers''' ) for _ in range(__snake_case ): lowerCamelCase_ =[int(__snake_case ) for x in input().split()] hill_matrix.append(__snake_case ) lowerCamelCase_ =HillCipher(numpy.array(__snake_case ) ) print('''Would you like to encrypt or decrypt some text? (1 or 2)''' ) lowerCamelCase_ =input('''\n1. Encrypt\n2. Decrypt\n''' ) if option == "1": lowerCamelCase_ =input('''What text would you like to encrypt?: ''' ) print('''Your encrypted text is:''' ) print(hc.encrypt(__snake_case ) ) elif option == "2": lowerCamelCase_ =input('''What text would you like to decrypt?: ''' ) print('''Your decrypted text is:''' ) print(hc.decrypt(__snake_case ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
75
'''simple docstring''' from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class __UpperCamelCase ( lowerCamelCase__ ): def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return 0.0 def a_ ( __snake_case : np.ndarray , __snake_case : int ) -> tuple[int | float, int | float]: """simple docstring""" lowerCamelCase_ =min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) lowerCamelCase_ =max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def a_ ( __snake_case : FilterType , __snake_case : int ) -> None: """simple docstring""" lowerCamelCase_ =512 lowerCamelCase_ =[1] + [0] * (size - 1) lowerCamelCase_ =[filter_type.process(__snake_case ) for item in inputs] lowerCamelCase_ =[0] * (samplerate - size) # zero-padding outputs += filler lowerCamelCase_ =np.abs(np.fft.fft(__snake_case ) ) lowerCamelCase_ =20 * np.logaa(__snake_case ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('''Frequency (Hz)''' ) plt.xscale('''log''' ) # Display within reasonable bounds lowerCamelCase_ =get_bounds(__snake_case , __snake_case ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel('''Gain (dB)''' ) plt.plot(__snake_case ) plt.show() def a_ ( __snake_case : FilterType , __snake_case : int ) -> None: """simple docstring""" lowerCamelCase_ =512 lowerCamelCase_ =[1] + [0] * (size - 1) lowerCamelCase_ =[filter_type.process(__snake_case ) for item in inputs] lowerCamelCase_ =[0] * (samplerate - size) # zero-padding outputs += filler lowerCamelCase_ =np.angle(np.fft.fft(__snake_case ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('''Frequency (Hz)''' ) plt.xscale('''log''' ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel('''Phase shift (Radians)''' ) plt.plot(np.unwrap(__snake_case , -2 * pi ) ) plt.show()
75
1
'''simple docstring''' import gc import unittest from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline from transformers.pipelines import PipelineException from transformers.testing_utils import ( is_pipeline_test, is_torch_available, nested_simplify, require_tf, require_torch, require_torch_gpu, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class __UpperCamelCase ( unittest.TestCase ): lowercase : Optional[int] =MODEL_FOR_MASKED_LM_MAPPING lowercase : Any =TF_MODEL_FOR_MASKED_LM_MAPPING def lowercase__ ( self ): """simple docstring""" super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() if is_torch_available(): import torch torch.cuda.empty_cache() @require_tf def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =pipeline(task='''fill-mask''', model='''sshleifer/tiny-distilroberta-base''', top_k=2, framework='''tf''' ) lowerCamelCase_ =unmasker('''My name is <mask>''' ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=6 ), [ {'''sequence''': '''My name is grouped''', '''score''': 2.1e-05, '''token''': 38_015, '''token_str''': ''' grouped'''}, {'''sequence''': '''My name is accuser''', '''score''': 2.1e-05, '''token''': 25_506, '''token_str''': ''' accuser'''}, ], ) lowerCamelCase_ =unmasker('''The largest city in France is <mask>''' ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=6 ), [ { '''sequence''': '''The largest city in France is grouped''', '''score''': 2.1e-05, '''token''': 38_015, '''token_str''': ''' grouped''', }, { '''sequence''': '''The largest city in France is accuser''', '''score''': 2.1e-05, '''token''': 25_506, '''token_str''': ''' accuser''', }, ], ) lowerCamelCase_ =unmasker('''My name is <mask>''', targets=[''' Patrick''', ''' Clara''', ''' Teven'''], top_k=3 ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=6 ), [ {'''sequence''': '''My name is Clara''', '''score''': 2e-05, '''token''': 13_606, '''token_str''': ''' Clara'''}, {'''sequence''': '''My name is Patrick''', '''score''': 2e-05, '''token''': 3_499, '''token_str''': ''' Patrick'''}, {'''sequence''': '''My name is Te''', '''score''': 1.9e-05, '''token''': 2_941, '''token_str''': ''' Te'''}, ], ) @require_torch def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =pipeline(task='''fill-mask''', model='''sshleifer/tiny-distilroberta-base''', top_k=2, framework='''pt''' ) lowerCamelCase_ =unmasker('''My name is <mask>''' ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=6 ), [ {'''sequence''': '''My name is Maul''', '''score''': 2.2e-05, '''token''': 35_676, '''token_str''': ''' Maul'''}, {'''sequence''': '''My name isELS''', '''score''': 2.2e-05, '''token''': 16_416, '''token_str''': '''ELS'''}, ], ) lowerCamelCase_ =unmasker('''The largest city in France is <mask>''' ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=6 ), [ { '''sequence''': '''The largest city in France is Maul''', '''score''': 2.2e-05, '''token''': 35_676, '''token_str''': ''' Maul''', }, {'''sequence''': '''The largest city in France isELS''', '''score''': 2.2e-05, '''token''': 16_416, '''token_str''': '''ELS'''}, ], ) lowerCamelCase_ =unmasker('''My name is <mask>''', targets=[''' Patrick''', ''' Clara''', ''' Teven'''], top_k=3 ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=6 ), [ {'''sequence''': '''My name is Patrick''', '''score''': 2.1e-05, '''token''': 3_499, '''token_str''': ''' Patrick'''}, {'''sequence''': '''My name is Te''', '''score''': 2e-05, '''token''': 2_941, '''token_str''': ''' Te'''}, {'''sequence''': '''My name is Clara''', '''score''': 2e-05, '''token''': 13_606, '''token_str''': ''' Clara'''}, ], ) lowerCamelCase_ =unmasker('''My name is <mask> <mask>''', top_k=2 ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=6 ), [ [ { '''score''': 2.2e-05, '''token''': 35_676, '''token_str''': ''' Maul''', '''sequence''': '''<s>My name is Maul<mask></s>''', }, {'''score''': 2.2e-05, '''token''': 16_416, '''token_str''': '''ELS''', '''sequence''': '''<s>My name isELS<mask></s>'''}, ], [ { '''score''': 2.2e-05, '''token''': 35_676, '''token_str''': ''' Maul''', '''sequence''': '''<s>My name is<mask> Maul</s>''', }, {'''score''': 2.2e-05, '''token''': 16_416, '''token_str''': '''ELS''', '''sequence''': '''<s>My name is<mask>ELS</s>'''}, ], ], ) @require_torch_gpu def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =pipeline('''fill-mask''', model='''hf-internal-testing/tiny-random-distilbert''', device=0, framework='''pt''' ) # convert model to fp16 pipe.model.half() lowerCamelCase_ =pipe('''Paris is the [MASK] of France.''' ) # We actually don't care about the result, we just want to make sure # it works, meaning the float16 tensor got casted back to float32 # for postprocessing. self.assertIsInstance(lowerCAmelCase, lowerCAmelCase ) @slow @require_torch def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =pipeline(task='''fill-mask''', model='''distilroberta-base''', top_k=2, framework='''pt''' ) self.run_large_test(lowerCAmelCase ) @slow @require_tf def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =pipeline(task='''fill-mask''', model='''distilroberta-base''', top_k=2, framework='''tf''' ) self.run_large_test(lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =unmasker('''My name is <mask>''' ) self.assertEqual( nested_simplify(lowerCAmelCase ), [ {'''sequence''': '''My name is John''', '''score''': 0.0_0_8, '''token''': 610, '''token_str''': ''' John'''}, {'''sequence''': '''My name is Chris''', '''score''': 0.0_0_7, '''token''': 1_573, '''token_str''': ''' Chris'''}, ], ) lowerCamelCase_ =unmasker('''The largest city in France is <mask>''' ) self.assertEqual( nested_simplify(lowerCAmelCase ), [ { '''sequence''': '''The largest city in France is Paris''', '''score''': 0.2_5_1, '''token''': 2_201, '''token_str''': ''' Paris''', }, { '''sequence''': '''The largest city in France is Lyon''', '''score''': 0.2_1_4, '''token''': 12_790, '''token_str''': ''' Lyon''', }, ], ) lowerCamelCase_ =unmasker('''My name is <mask>''', targets=[''' Patrick''', ''' Clara''', ''' Teven'''], top_k=3 ) self.assertEqual( nested_simplify(lowerCAmelCase ), [ {'''sequence''': '''My name is Patrick''', '''score''': 0.0_0_5, '''token''': 3_499, '''token_str''': ''' Patrick'''}, {'''sequence''': '''My name is Clara''', '''score''': 0.0_0_0, '''token''': 13_606, '''token_str''': ''' Clara'''}, {'''sequence''': '''My name is Te''', '''score''': 0.0_0_0, '''token''': 2_941, '''token_str''': ''' Te'''}, ], ) @require_torch def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =pipeline(task='''fill-mask''', model='''sshleifer/tiny-distilroberta-base''', framework='''pt''' ) lowerCamelCase_ =None lowerCamelCase_ =None self.run_pipeline_test(lowerCAmelCase, [] ) @require_tf def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =pipeline(task='''fill-mask''', model='''sshleifer/tiny-distilroberta-base''', framework='''tf''' ) lowerCamelCase_ =None lowerCamelCase_ =None self.run_pipeline_test(lowerCAmelCase, [] ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" if tokenizer is None or tokenizer.mask_token_id is None: self.skipTest('''The provided tokenizer has no mask token, (probably reformer or wav2vec2)''' ) lowerCamelCase_ =FillMaskPipeline(model=lowerCAmelCase, tokenizer=lowerCAmelCase ) lowerCamelCase_ =[ f'''This is another {tokenizer.mask_token} test''', ] return fill_masker, examples def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =fill_masker.tokenizer lowerCamelCase_ =fill_masker.model lowerCamelCase_ =fill_masker( f'''This is a {tokenizer.mask_token}''', ) self.assertEqual( lowerCAmelCase, [ {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, ], ) lowerCamelCase_ =fill_masker([f'''This is a {tokenizer.mask_token}'''] ) self.assertEqual( lowerCAmelCase, [ {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, ], ) lowerCamelCase_ =fill_masker([f'''This is a {tokenizer.mask_token}''', f'''Another {tokenizer.mask_token} great test.'''] ) self.assertEqual( lowerCAmelCase, [ [ {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, ], [ {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, ], ], ) with self.assertRaises(lowerCAmelCase ): fill_masker([None] ) # No mask_token is not supported with self.assertRaises(lowerCAmelCase ): fill_masker('''This is''' ) self.run_test_top_k(lowerCAmelCase, lowerCAmelCase ) self.run_test_targets(lowerCAmelCase, lowerCAmelCase ) self.run_test_top_k_targets(lowerCAmelCase, lowerCAmelCase ) self.fill_mask_with_duplicate_targets_and_top_k(lowerCAmelCase, lowerCAmelCase ) self.fill_mask_with_multiple_masks(lowerCAmelCase, lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =tokenizer.get_vocab() lowerCamelCase_ =sorted(vocab.keys() )[:2] # Pipeline argument lowerCamelCase_ =FillMaskPipeline(model=lowerCAmelCase, tokenizer=lowerCAmelCase, targets=lowerCAmelCase ) lowerCamelCase_ =fill_masker(f'''This is a {tokenizer.mask_token}''' ) self.assertEqual( lowerCAmelCase, [ {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, ], ) lowerCamelCase_ ={vocab[el] for el in targets} self.assertEqual({el['''token'''] for el in outputs}, lowerCAmelCase ) lowerCamelCase_ =[tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el['''token_str'''] for el in outputs}, set(lowerCAmelCase ) ) # Call argument lowerCamelCase_ =FillMaskPipeline(model=lowerCAmelCase, tokenizer=lowerCAmelCase ) lowerCamelCase_ =fill_masker(f'''This is a {tokenizer.mask_token}''', targets=lowerCAmelCase ) self.assertEqual( lowerCAmelCase, [ {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, ], ) lowerCamelCase_ ={vocab[el] for el in targets} self.assertEqual({el['''token'''] for el in outputs}, lowerCAmelCase ) lowerCamelCase_ =[tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el['''token_str'''] for el in outputs}, set(lowerCAmelCase ) ) # Score equivalence lowerCamelCase_ =fill_masker(f'''This is a {tokenizer.mask_token}''', targets=lowerCAmelCase ) lowerCamelCase_ =[top_mask['''token_str'''] for top_mask in outputs] lowerCamelCase_ =[top_mask['''score'''] for top_mask in outputs] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(lowerCAmelCase ) == set(lowerCAmelCase ): lowerCamelCase_ =fill_masker(f'''This is a {tokenizer.mask_token}''', targets=lowerCAmelCase ) lowerCamelCase_ =[top_mask['''score'''] for top_mask in unmasked_targets] self.assertEqual(nested_simplify(lowerCAmelCase ), nested_simplify(lowerCAmelCase ) ) # Raises with invalid with self.assertRaises(lowerCAmelCase ): lowerCamelCase_ =fill_masker(f'''This is a {tokenizer.mask_token}''', targets=[] ) # For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised if "" not in tokenizer.get_vocab(): with self.assertRaises(lowerCAmelCase ): lowerCamelCase_ =fill_masker(f'''This is a {tokenizer.mask_token}''', targets=[''''''] ) with self.assertRaises(lowerCAmelCase ): lowerCamelCase_ =fill_masker(f'''This is a {tokenizer.mask_token}''', targets='''''' ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =FillMaskPipeline(model=lowerCAmelCase, tokenizer=lowerCAmelCase, top_k=2 ) lowerCamelCase_ =fill_masker(f'''This is a {tokenizer.mask_token}''' ) self.assertEqual( lowerCAmelCase, [ {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, ], ) lowerCamelCase_ =FillMaskPipeline(model=lowerCAmelCase, tokenizer=lowerCAmelCase ) lowerCamelCase_ =fill_masker(f'''This is a {tokenizer.mask_token}''', top_k=2 ) self.assertEqual( lowerCAmelCase, [ {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, ], ) self.assertEqual(nested_simplify(lowerCAmelCase ), nested_simplify(lowerCAmelCase ) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =tokenizer.get_vocab() lowerCamelCase_ =FillMaskPipeline(model=lowerCAmelCase, tokenizer=lowerCAmelCase ) # top_k=2, ntargets=3 lowerCamelCase_ =sorted(vocab.keys() )[:3] lowerCamelCase_ =fill_masker(f'''This is a {tokenizer.mask_token}''', top_k=2, targets=lowerCAmelCase ) # If we use the most probably targets, and filter differently, we should still # have the same results lowerCamelCase_ =[el['''token_str'''] for el in sorted(lowerCAmelCase, key=lambda lowerCAmelCase : x["score"], reverse=lowerCAmelCase )] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(lowerCAmelCase ).issubset(lowerCAmelCase ): lowerCamelCase_ =fill_masker(f'''This is a {tokenizer.mask_token}''', top_k=3, targets=lowerCAmelCase ) # They should yield exactly the same result self.assertEqual(nested_simplify(lowerCAmelCase ), nested_simplify(lowerCAmelCase ) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =FillMaskPipeline(model=lowerCAmelCase, tokenizer=lowerCAmelCase ) lowerCamelCase_ =tokenizer.get_vocab() # String duplicates + id duplicates lowerCamelCase_ =sorted(vocab.keys() )[:3] lowerCamelCase_ =[targets[0], targets[1], targets[0], targets[2], targets[1]] lowerCamelCase_ =fill_masker(f'''My name is {tokenizer.mask_token}''', targets=lowerCAmelCase, top_k=10 ) # The target list contains duplicates, so we can't output more # than them self.assertEqual(len(lowerCAmelCase ), 3 ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =FillMaskPipeline(model=lowerCAmelCase, tokenizer=lowerCAmelCase ) lowerCamelCase_ =fill_masker( f'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''', top_k=2 ) self.assertEqual( lowerCAmelCase, [ [ {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, ], [ {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, ], [ {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, {'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )}, ], ], )
75
'''simple docstring''' import os import unittest from transformers import FunnelTokenizer, FunnelTokenizerFast from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ): lowercase : Union[str, Any] =FunnelTokenizer lowercase : List[str] =FunnelTokenizerFast lowercase : Union[str, Any] =True lowercase : int =True def lowercase__ ( self ): """simple docstring""" super().setUp() lowerCamelCase_ =[ '''<unk>''', '''<cls>''', '''<sep>''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] lowerCamelCase_ =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def lowercase__ ( self, **lowerCAmelCase ): """simple docstring""" return FunnelTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase ) def lowercase__ ( self, **lowerCAmelCase ): """simple docstring""" return FunnelTokenizerFast.from_pretrained(self.tmpdirname, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ ='''UNwant\u00E9d,running''' lowerCamelCase_ ='''unwanted, running''' return input_text, output_text def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.tokenizer_class(self.vocab_file ) lowerCamelCase_ =tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(lowerCAmelCase, ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ), [7, 4, 5, 10, 8, 9] ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_tokenizers(do_lower_case=lowerCAmelCase ) for tokenizer in tokenizers: lowerCamelCase_ =tokenizer('''UNwant\u00E9d,running''' ) lowerCamelCase_ =len(inputs['''input_ids'''] ) - 1 self.assertListEqual(inputs['''token_type_ids'''], [2] + [0] * sentence_len ) lowerCamelCase_ =tokenizer('''UNwant\u00E9d,running''', '''UNwant\u00E9d,running''' ) self.assertListEqual(inputs['''token_type_ids'''], [2] + [0] * sentence_len + [1] * sentence_len )
75
1
'''simple docstring''' from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. a_ : str = 2_00 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. a_ : Tuple = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. a_ : List[Any] = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 10_00)) def a_ ( __snake_case : str , __snake_case : str ) -> tuple[str, float]: """simple docstring""" lowerCamelCase_ =len([g for position, g in enumerate(__snake_case ) if g == main_target[position]] ) return (item, float(__snake_case )) def a_ ( __snake_case : str , __snake_case : str ) -> tuple[str, str]: """simple docstring""" lowerCamelCase_ =random.randint(0 , len(__snake_case ) - 1 ) lowerCamelCase_ =parent_a[:random_slice] + parent_a[random_slice:] lowerCamelCase_ =parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def a_ ( __snake_case : str , __snake_case : list[str] ) -> str: """simple docstring""" lowerCamelCase_ =list(__snake_case ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: lowerCamelCase_ =random.choice(__snake_case ) return "".join(__snake_case ) def a_ ( __snake_case : tuple[str, float] , __snake_case : list[tuple[str, float]] , __snake_case : list[str] , ) -> list[str]: """simple docstring""" lowerCamelCase_ =[] # Generate more children proportionally to the fitness score. lowerCamelCase_ =int(parent_a[1] * 100 ) + 1 lowerCamelCase_ =10 if child_n >= 10 else child_n for _ in range(__snake_case ): lowerCamelCase_ =population_score[random.randint(0 , __snake_case )][0] lowerCamelCase_, lowerCamelCase_ =crossover(parent_a[0] , __snake_case ) # Append new string to the population list. pop.append(mutate(__snake_case , __snake_case ) ) pop.append(mutate(__snake_case , __snake_case ) ) return pop def a_ ( __snake_case : str , __snake_case : list[str] , __snake_case : bool = True ) -> tuple[int, int, str]: """simple docstring""" # Verify if N_POPULATION is bigger than N_SELECTED if N_POPULATION < N_SELECTED: lowerCamelCase_ =F'''{N_POPULATION} must be bigger than {N_SELECTED}''' raise ValueError(__snake_case ) # Verify that the target contains no genes besides the ones inside genes variable. lowerCamelCase_ =sorted({c for c in target if c not in genes} ) if not_in_genes_list: lowerCamelCase_ =F'''{not_in_genes_list} is not in genes list, evolution cannot converge''' raise ValueError(__snake_case ) # Generate random starting population. lowerCamelCase_ =[] for _ in range(__snake_case ): population.append(''''''.join([random.choice(__snake_case ) for i in range(len(__snake_case ) )] ) ) # Just some logs to know what the algorithms is doing. lowerCamelCase_, lowerCamelCase_ =0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(__snake_case ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. lowerCamelCase_ =[evaluate(__snake_case , __snake_case ) for item in population] # Check if there is a matching evolution. lowerCamelCase_ =sorted(__snake_case , key=lambda __snake_case : x[1] , reverse=__snake_case ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( F'''\nGeneration: {generation}''' F'''\nTotal Population:{total_population}''' F'''\nBest score: {population_score[0][1]}''' F'''\nBest string: {population_score[0][0]}''' ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. lowerCamelCase_ =population[: int(N_POPULATION / 3 )] population.clear() population.extend(__snake_case ) # Normalize population score to be between 0 and 1. lowerCamelCase_ =[ (item, score / len(__snake_case )) for item, score in population_score ] # This is selection for i in range(__snake_case ): population.extend(select(population_score[int(__snake_case )] , __snake_case , __snake_case ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(__snake_case ) > N_POPULATION: break if __name__ == "__main__": a_ : str = ( """This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!""" ) a_ : Dict = list( """ ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm""" """nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\""" ) a_ , a_ , a_ : Optional[Any] = basic(target_str, genes_list) print( F"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}""" )
75
'''simple docstring''' import argparse import os import re import torch from flax.traverse_util import flatten_dict from tax import checkpoints from transformers import ( AutoTokenizer, PixaStructConfig, PixaStructForConditionalGeneration, PixaStructImageProcessor, PixaStructProcessor, PixaStructTextConfig, PixaStructVisionConfig, ) def a_ ( __snake_case : Any ) -> int: """simple docstring""" lowerCamelCase_ =checkpoints.load_tax_checkpoint(__snake_case ) lowerCamelCase_ =flatten_dict(__snake_case ) return flax_params def a_ ( __snake_case : Dict ) -> Optional[int]: """simple docstring""" lowerCamelCase_ ={} lowerCamelCase_ ={ '''token_embedder''': '''embeddings''', '''encoder_norm''': '''layernorm''', '''kernel''': '''weight''', '''.out''': '''.output''', '''scale''': '''weight''', '''embedders_0.pos_embedding''': '''row_embedder.weight''', '''embedders_1.pos_embedding''': '''column_embedder.weight''', } lowerCamelCase_ ={ '''query''': '''attention.query''', '''key''': '''attention.key''', '''value''': '''attention.value''', '''output.dense''': '''output''', '''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''', '''pre_self_attention_layer_norm''': '''self_attention.layer_norm''', '''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''', '''mlp.''': '''mlp.DenseReluDense.''', '''pre_mlp_layer_norm''': '''mlp.layer_norm''', '''self_attention.o''': '''self_attention.attention.o''', '''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''', '''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''', '''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''', '''decoder.logits_dense.weight''': '''decoder.lm_head.weight''', } for key in flax_dict.keys(): if "target" in key: # remove the first prefix from the key lowerCamelCase_ ='''.'''.join(key[1:] ) # rename the key for old, new in CONVERSION_MAPPING.items(): lowerCamelCase_ =new_key.replace(__snake_case , __snake_case ) if "decoder" in new_key: for old, new in DECODER_CONVERSION_MAPPING.items(): lowerCamelCase_ =new_key.replace(__snake_case , __snake_case ) if "layers" in new_key and "decoder" not in new_key: # use regex to replace the layer number lowerCamelCase_ =re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , __snake_case ) lowerCamelCase_ =new_key.replace('''encoder''' , '''encoder.encoder''' ) elif "layers" in new_key and "decoder" in new_key: # use regex to replace the layer number lowerCamelCase_ =re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , __snake_case ) lowerCamelCase_ =flax_dict[key] lowerCamelCase_ ={} # convert converted_dict into torch format for key in converted_dict.keys(): if ("embed_tokens" not in key) and ("embedder" not in key): lowerCamelCase_ =torch.from_numpy(converted_dict[key].T ) else: lowerCamelCase_ =torch.from_numpy(converted_dict[key] ) return converted_torch_dict def a_ ( __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Any=False , __snake_case : Optional[int]=False ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ =get_flax_param(__snake_case ) if not use_large: lowerCamelCase_ =PixaStructVisionConfig() lowerCamelCase_ =PixaStructTextConfig() else: lowerCamelCase_ =PixaStructVisionConfig( hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 ) lowerCamelCase_ =PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 ) lowerCamelCase_ =PixaStructConfig( vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=__snake_case ) lowerCamelCase_ =PixaStructForConditionalGeneration(__snake_case ) lowerCamelCase_ =rename_and_convert_flax_params(__snake_case ) model.load_state_dict(__snake_case ) lowerCamelCase_ =AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' ) lowerCamelCase_ =PixaStructImageProcessor() lowerCamelCase_ =PixaStructProcessor(image_processor=__snake_case , tokenizer=__snake_case ) if use_large: lowerCamelCase_ =4096 lowerCamelCase_ =True # mkdir if needed os.makedirs(__snake_case , exist_ok=__snake_case ) model.save_pretrained(__snake_case ) processor.save_pretrained(__snake_case ) print('''Model saved in {}'''.format(__snake_case ) ) if __name__ == "__main__": a_ : Optional[int] = argparse.ArgumentParser() parser.add_argument("""--t5x_checkpoint_path""", default=None, type=str, help="""Path to the original T5x checkpoint.""") parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--use_large""", action="""store_true""", help="""Use large model.""") parser.add_argument("""--is_vqa""", action="""store_true""", help="""Use large model.""") a_ : Tuple = parser.parse_args() convert_pixastruct_original_pytorch_checkpoint_to_hf( args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large )
75
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_dpt import DPTImageProcessor a_ : Any = logging.get_logger(__name__) class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" warnings.warn( '''The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use DPTImageProcessor instead.''', lowerCAmelCase, ) super().__init__(*lowerCAmelCase, **lowerCAmelCase )
75
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging a_ : Union[str, Any] = logging.get_logger(__name__) if is_vision_available(): import PIL class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Tuple =['pixel_values'] def __init__( self, lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = PILImageResampling.BICUBIC, lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = True, lowerCAmelCase = 1 / 255, lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = True, **lowerCAmelCase, ): """simple docstring""" super().__init__(**lowerCAmelCase ) lowerCamelCase_ =size if size is not None else {'''shortest_edge''': 224} lowerCamelCase_ =get_size_dict(lowerCAmelCase, default_to_square=lowerCAmelCase ) lowerCamelCase_ =crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} lowerCamelCase_ =get_size_dict(lowerCAmelCase, default_to_square=lowerCAmelCase, param_name='''crop_size''' ) lowerCamelCase_ =do_resize lowerCamelCase_ =size lowerCamelCase_ =resample lowerCamelCase_ =do_center_crop lowerCamelCase_ =crop_size lowerCamelCase_ =do_rescale lowerCamelCase_ =rescale_factor lowerCamelCase_ =do_normalize lowerCamelCase_ =image_mean if image_mean is not None else OPENAI_CLIP_MEAN lowerCamelCase_ =image_std if image_std is not None else OPENAI_CLIP_STD lowerCamelCase_ =do_convert_rgb def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = PILImageResampling.BICUBIC, lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =get_size_dict(lowerCAmelCase, default_to_square=lowerCAmelCase ) if "shortest_edge" not in size: raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) lowerCamelCase_ =get_resize_output_image_size(lowerCAmelCase, size=size['''shortest_edge'''], default_to_square=lowerCAmelCase ) return resize(lowerCAmelCase, size=lowerCAmelCase, resample=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =get_size_dict(lowerCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(lowerCAmelCase, size=(size['''height'''], size['''width''']), data_format=lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" return rescale(lowerCAmelCase, scale=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" return normalize(lowerCAmelCase, mean=lowerCAmelCase, std=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = ChannelDimension.FIRST, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =do_resize if do_resize is not None else self.do_resize lowerCamelCase_ =size if size is not None else self.size lowerCamelCase_ =get_size_dict(lowerCAmelCase, param_name='''size''', default_to_square=lowerCAmelCase ) lowerCamelCase_ =resample if resample is not None else self.resample lowerCamelCase_ =do_center_crop if do_center_crop is not None else self.do_center_crop lowerCamelCase_ =crop_size if crop_size is not None else self.crop_size lowerCamelCase_ =get_size_dict(lowerCAmelCase, param_name='''crop_size''', default_to_square=lowerCAmelCase ) lowerCamelCase_ =do_rescale if do_rescale is not None else self.do_rescale lowerCamelCase_ =rescale_factor if rescale_factor is not None else self.rescale_factor lowerCamelCase_ =do_normalize if do_normalize is not None else self.do_normalize lowerCamelCase_ =image_mean if image_mean is not None else self.image_mean lowerCamelCase_ =image_std if image_std is not None else self.image_std lowerCamelCase_ =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb lowerCamelCase_ =make_list_of_images(lowerCAmelCase ) if not valid_images(lowerCAmelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: lowerCamelCase_ =[convert_to_rgb(lowerCAmelCase ) for image in images] # All transformations expect numpy arrays. lowerCamelCase_ =[to_numpy_array(lowerCAmelCase ) for image in images] if do_resize: lowerCamelCase_ =[self.resize(image=lowerCAmelCase, size=lowerCAmelCase, resample=lowerCAmelCase ) for image in images] if do_center_crop: lowerCamelCase_ =[self.center_crop(image=lowerCAmelCase, size=lowerCAmelCase ) for image in images] if do_rescale: lowerCamelCase_ =[self.rescale(image=lowerCAmelCase, scale=lowerCAmelCase ) for image in images] if do_normalize: lowerCamelCase_ =[self.normalize(image=lowerCAmelCase, mean=lowerCAmelCase, std=lowerCAmelCase ) for image in images] lowerCamelCase_ =[to_channel_dimension_format(lowerCAmelCase, lowerCAmelCase ) for image in images] lowerCamelCase_ ={'''pixel_values''': images} return BatchFeature(data=lowerCAmelCase, tensor_type=lowerCAmelCase )
75
1
'''simple docstring''' import tempfile import numpy as np import torch from transformers import AutoTokenizer, TaEncoderModel from diffusers import DDPMScheduler, UNetaDConditionModel from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.pipelines.deepfloyd_if import IFWatermarker from diffusers.utils.testing_utils import torch_device from ..test_pipelines_common import to_np class __UpperCamelCase : def lowercase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCamelCase_ =TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) lowerCamelCase_ =AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) lowerCamelCase_ =UNetaDConditionModel( sample_size=32, layers_per_block=1, block_out_channels=[32, 64], down_block_types=[ '''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D''', ], mid_block_type='''UNetMidBlock2DSimpleCrossAttn''', up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''], in_channels=3, out_channels=6, cross_attention_dim=32, encoder_hid_dim=32, attention_head_dim=8, addition_embed_type='''text''', addition_embed_type_num_heads=2, cross_attention_norm='''group_norm''', resnet_time_scale_shift='''scale_shift''', act_fn='''gelu''', ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) lowerCamelCase_ =DDPMScheduler( num_train_timesteps=1_000, beta_schedule='''squaredcos_cap_v2''', beta_start=0.0_0_0_1, beta_end=0.0_2, thresholding=lowerCAmelCase, dynamic_thresholding_ratio=0.9_5, sample_max_value=1.0, prediction_type='''epsilon''', variance_type='''learned_range''', ) torch.manual_seed(0 ) lowerCamelCase_ =IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def lowercase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCamelCase_ =TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) lowerCamelCase_ =AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) lowerCamelCase_ =UNetaDConditionModel( sample_size=32, layers_per_block=[1, 2], block_out_channels=[32, 64], down_block_types=[ '''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D''', ], mid_block_type='''UNetMidBlock2DSimpleCrossAttn''', up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''], in_channels=6, out_channels=6, cross_attention_dim=32, encoder_hid_dim=32, attention_head_dim=8, addition_embed_type='''text''', addition_embed_type_num_heads=2, cross_attention_norm='''group_norm''', resnet_time_scale_shift='''scale_shift''', act_fn='''gelu''', class_embed_type='''timestep''', mid_block_scale_factor=1.4_1_4, time_embedding_act_fn='''gelu''', time_embedding_dim=32, ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) lowerCamelCase_ =DDPMScheduler( num_train_timesteps=1_000, beta_schedule='''squaredcos_cap_v2''', beta_start=0.0_0_0_1, beta_end=0.0_2, thresholding=lowerCAmelCase, dynamic_thresholding_ratio=0.9_5, sample_max_value=1.0, prediction_type='''epsilon''', variance_type='''learned_range''', ) torch.manual_seed(0 ) lowerCamelCase_ =DDPMScheduler( num_train_timesteps=1_000, beta_schedule='''squaredcos_cap_v2''', beta_start=0.0_0_0_1, beta_end=0.0_2, ) torch.manual_seed(0 ) lowerCamelCase_ =IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "image_noising_scheduler": image_noising_scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_dummy_components() lowerCamelCase_ =self.pipeline_class(**lowerCAmelCase ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase ) lowerCamelCase_ =inputs['''prompt'''] lowerCamelCase_ =inputs['''generator'''] lowerCamelCase_ =inputs['''num_inference_steps'''] lowerCamelCase_ =inputs['''output_type'''] if "image" in inputs: lowerCamelCase_ =inputs['''image'''] else: lowerCamelCase_ =None if "mask_image" in inputs: lowerCamelCase_ =inputs['''mask_image'''] else: lowerCamelCase_ =None if "original_image" in inputs: lowerCamelCase_ =inputs['''original_image'''] else: lowerCamelCase_ =None lowerCamelCase_, lowerCamelCase_ =pipe.encode_prompt(lowerCAmelCase ) # inputs with prompt converted to embeddings lowerCamelCase_ ={ '''prompt_embeds''': prompt_embeds, '''negative_prompt_embeds''': negative_prompt_embeds, '''generator''': generator, '''num_inference_steps''': num_inference_steps, '''output_type''': output_type, } if image is not None: lowerCamelCase_ =image if mask_image is not None: lowerCamelCase_ =mask_image if original_image is not None: lowerCamelCase_ =original_image # set all optional components to None for optional_component in pipe._optional_components: setattr(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =pipe(**lowerCAmelCase )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(lowerCAmelCase ) lowerCamelCase_ =self.pipeline_class.from_pretrained(lowerCAmelCase ) pipe_loaded.to(lowerCAmelCase ) pipe_loaded.set_progress_bar_config(disable=lowerCAmelCase ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests for optional_component in pipe._optional_components: self.assertTrue( getattr(lowerCAmelCase, lowerCAmelCase ) is None, f'''`{optional_component}` did not stay set to None after loading.''', ) lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase ) lowerCamelCase_ =inputs['''generator'''] lowerCamelCase_ =inputs['''num_inference_steps'''] lowerCamelCase_ =inputs['''output_type'''] # inputs with prompt converted to embeddings lowerCamelCase_ ={ '''prompt_embeds''': prompt_embeds, '''negative_prompt_embeds''': negative_prompt_embeds, '''generator''': generator, '''num_inference_steps''': num_inference_steps, '''output_type''': output_type, } if image is not None: lowerCamelCase_ =image if mask_image is not None: lowerCamelCase_ =mask_image if original_image is not None: lowerCamelCase_ =original_image lowerCamelCase_ =pipe_loaded(**lowerCAmelCase )[0] lowerCamelCase_ =np.abs(to_np(lowerCAmelCase ) - to_np(lowerCAmelCase ) ).max() self.assertLess(lowerCAmelCase, 1e-4 ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_dummy_components() lowerCamelCase_ =self.pipeline_class(**lowerCAmelCase ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase ) lowerCamelCase_ =pipe(**lowerCAmelCase )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(lowerCAmelCase ) lowerCamelCase_ =self.pipeline_class.from_pretrained(lowerCAmelCase ) pipe_loaded.to(lowerCAmelCase ) pipe_loaded.set_progress_bar_config(disable=lowerCAmelCase ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase ) lowerCamelCase_ =pipe_loaded(**lowerCAmelCase )[0] lowerCamelCase_ =np.abs(to_np(lowerCAmelCase ) - to_np(lowerCAmelCase ) ).max() self.assertLess(lowerCAmelCase, 1e-4 )
75
'''simple docstring''' from __future__ import annotations def a_ ( __snake_case : str , __snake_case : list[str] | None = None , __snake_case : dict[str, float] | None = None , __snake_case : bool = False , ) -> tuple[int, float, str]: """simple docstring""" lowerCamelCase_ =cipher_alphabet or [chr(__snake_case ) for i in range(97 , 123 )] # If the argument is None or the user provided an empty dictionary if not frequencies_dict: # Frequencies of letters in the english language (how much they show up) lowerCamelCase_ ={ '''a''': 0.0_8_4_9_7, '''b''': 0.0_1_4_9_2, '''c''': 0.0_2_2_0_2, '''d''': 0.0_4_2_5_3, '''e''': 0.1_1_1_6_2, '''f''': 0.0_2_2_2_8, '''g''': 0.0_2_0_1_5, '''h''': 0.0_6_0_9_4, '''i''': 0.0_7_5_4_6, '''j''': 0.0_0_1_5_3, '''k''': 0.0_1_2_9_2, '''l''': 0.0_4_0_2_5, '''m''': 0.0_2_4_0_6, '''n''': 0.0_6_7_4_9, '''o''': 0.0_7_5_0_7, '''p''': 0.0_1_9_2_9, '''q''': 0.0_0_0_9_5, '''r''': 0.0_7_5_8_7, '''s''': 0.0_6_3_2_7, '''t''': 0.0_9_3_5_6, '''u''': 0.0_2_7_5_8, '''v''': 0.0_0_9_7_8, '''w''': 0.0_2_5_6_0, '''x''': 0.0_0_1_5_0, '''y''': 0.0_1_9_9_4, '''z''': 0.0_0_0_7_7, } else: # Custom frequencies dictionary lowerCamelCase_ =frequencies_dict if not case_sensitive: lowerCamelCase_ =ciphertext.lower() # Chi squared statistic values lowerCamelCase_ ={} # cycle through all of the shifts for shift in range(len(__snake_case ) ): lowerCamelCase_ ='''''' # decrypt the message with the shift for letter in ciphertext: try: # Try to index the letter in the alphabet lowerCamelCase_ =(alphabet_letters.index(letter.lower() ) - shift) % len( __snake_case ) decrypted_with_shift += ( alphabet_letters[new_key].upper() if case_sensitive and letter.isupper() else alphabet_letters[new_key] ) except ValueError: # Append the character if it isn't in the alphabet decrypted_with_shift += letter lowerCamelCase_ =0.0 # Loop through each letter in the decoded message with the shift for letter in decrypted_with_shift: if case_sensitive: lowerCamelCase_ =letter.lower() if letter in frequencies: # Get the amount of times the letter occurs in the message lowerCamelCase_ =decrypted_with_shift.lower().count(__snake_case ) # Get the excepcted amount of times the letter should appear based # on letter frequencies lowerCamelCase_ =frequencies[letter] * occurrences # Complete the chi squared statistic formula lowerCamelCase_ =((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value else: if letter.lower() in frequencies: # Get the amount of times the letter occurs in the message lowerCamelCase_ =decrypted_with_shift.count(__snake_case ) # Get the excepcted amount of times the letter should appear based # on letter frequencies lowerCamelCase_ =frequencies[letter] * occurrences # Complete the chi squared statistic formula lowerCamelCase_ =((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary lowerCamelCase_ =( chi_squared_statistic, decrypted_with_shift, ) # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic def chi_squared_statistic_values_sorting_key(__snake_case : int ) -> tuple[float, str]: return chi_squared_statistic_values[key] lowerCamelCase_ =min( __snake_case , key=__snake_case , ) # Get all the data from the most likely cipher (key, decoded message) ( ( lowerCamelCase_ ), ( lowerCamelCase_ ), ) =chi_squared_statistic_values[most_likely_cipher] # Return the data on the most likely shift return ( most_likely_cipher, most_likely_cipher_chi_squared_value, decoded_most_likely_cipher, )
75
1
'''simple docstring''' from math import isqrt def a_ ( __snake_case : int ) -> bool: """simple docstring""" return all(number % divisor != 0 for divisor in range(2 , isqrt(__snake_case ) + 1 ) ) def a_ ( __snake_case : int = 10**6 ) -> int: """simple docstring""" lowerCamelCase_ =0 lowerCamelCase_ =1 lowerCamelCase_ =7 while prime_candidate < max_prime: primes_count += is_prime(__snake_case ) cube_index += 1 prime_candidate += 6 * cube_index return primes_count if __name__ == "__main__": print(F"""{solution() = }""")
75
'''simple docstring''' import importlib import inspect import json import os import re import shutil import sys from pathlib import Path from typing import Dict, Optional, Union from urllib import request from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info from packaging import version from .. import __version__ from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging a_ : List[Any] = ( """https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py""" ) a_ : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name def a_ ( ) -> List[str]: """simple docstring""" lowerCamelCase_ ='''https://pypi.org/pypi/diffusers/json''' lowerCamelCase_ =json.loads(request.urlopen(__snake_case ).read() )['''releases'''].keys() return sorted(__snake_case , key=lambda __snake_case : version.Version(__snake_case ) ) def a_ ( ) -> str: """simple docstring""" # This function has already been executed if HF_MODULES_CACHE already is in the Python path. if HF_MODULES_CACHE in sys.path: return sys.path.append(__snake_case ) os.makedirs(__snake_case , exist_ok=__snake_case ) lowerCamelCase_ =Path(__snake_case ) / '''__init__.py''' if not init_path.exists(): init_path.touch() def a_ ( __snake_case : Union[str, os.PathLike] ) -> List[str]: """simple docstring""" init_hf_modules() lowerCamelCase_ =Path(__snake_case ) / name # If the parent module does not exist yet, recursively create it. if not dynamic_module_path.parent.exists(): create_dynamic_module(dynamic_module_path.parent ) os.makedirs(__snake_case , exist_ok=__snake_case ) lowerCamelCase_ =dynamic_module_path / '''__init__.py''' if not init_path.exists(): init_path.touch() def a_ ( __snake_case : Tuple ) -> List[str]: """simple docstring""" with open(__snake_case , '''r''' , encoding='''utf-8''' ) as f: lowerCamelCase_ =f.read() # Imports of the form `import .xxx` lowerCamelCase_ =re.findall('''^\s*import\s+\.(\S+)\s*$''' , __snake_case , flags=re.MULTILINE ) # Imports of the form `from .xxx import yyy` relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , __snake_case , flags=re.MULTILINE ) # Unique-ify return list(set(__snake_case ) ) def a_ ( __snake_case : str ) -> str: """simple docstring""" lowerCamelCase_ =False lowerCamelCase_ =[module_file] lowerCamelCase_ =[] # Let's recurse through all relative imports while not no_change: lowerCamelCase_ =[] for f in files_to_check: new_imports.extend(get_relative_imports(__snake_case ) ) lowerCamelCase_ =Path(__snake_case ).parent lowerCamelCase_ =[str(module_path / m ) for m in new_imports] lowerCamelCase_ =[f for f in new_import_files if f not in all_relative_imports] lowerCamelCase_ =[F'''{f}.py''' for f in new_import_files] lowerCamelCase_ =len(__snake_case ) == 0 all_relative_imports.extend(__snake_case ) return all_relative_imports def a_ ( __snake_case : Union[str, Any] ) -> Optional[int]: """simple docstring""" with open(__snake_case , '''r''' , encoding='''utf-8''' ) as f: lowerCamelCase_ =f.read() # Imports of the form `import xxx` lowerCamelCase_ =re.findall('''^\s*import\s+(\S+)\s*$''' , __snake_case , flags=re.MULTILINE ) # Imports of the form `from xxx import yyy` imports += re.findall('''^\s*from\s+(\S+)\s+import''' , __snake_case , flags=re.MULTILINE ) # Only keep the top-level module lowerCamelCase_ =[imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )] # Unique-ify and test we got them all lowerCamelCase_ =list(set(__snake_case ) ) lowerCamelCase_ =[] for imp in imports: try: importlib.import_module(__snake_case ) except ImportError: missing_packages.append(__snake_case ) if len(__snake_case ) > 0: raise ImportError( '''This modeling file requires the following packages that were not found in your environment: ''' F'''{', '.join(__snake_case )}. Run `pip install {' '.join(__snake_case )}`''' ) return get_relative_imports(__snake_case ) def a_ ( __snake_case : Tuple , __snake_case : Tuple ) -> List[Any]: """simple docstring""" lowerCamelCase_ =module_path.replace(os.path.sep , '''.''' ) lowerCamelCase_ =importlib.import_module(__snake_case ) if class_name is None: return find_pipeline_class(__snake_case ) return getattr(__snake_case , __snake_case ) def a_ ( __snake_case : Dict ) -> Any: """simple docstring""" from ..pipelines import DiffusionPipeline lowerCamelCase_ =dict(inspect.getmembers(__snake_case , inspect.isclass ) ) lowerCamelCase_ =None for cls_name, cls in cls_members.items(): if ( cls_name != DiffusionPipeline.__name__ and issubclass(cls , __snake_case ) and cls.__module__.split('''.''' )[0] != "diffusers" ): if pipeline_class is not None: raise ValueError( F'''Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:''' F''' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in''' F''' {loaded_module}.''' ) lowerCamelCase_ =cls return pipeline_class def a_ ( __snake_case : Union[str, os.PathLike] , __snake_case : str , __snake_case : Optional[Union[str, os.PathLike]] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[Dict[str, str]] = None , __snake_case : Optional[Union[bool, str]] = None , __snake_case : Optional[str] = None , __snake_case : bool = False , ) -> Optional[int]: """simple docstring""" lowerCamelCase_ =str(__snake_case ) lowerCamelCase_ =os.path.join(__snake_case , __snake_case ) if os.path.isfile(__snake_case ): lowerCamelCase_ =module_file_or_url lowerCamelCase_ ='''local''' elif pretrained_model_name_or_path.count('''/''' ) == 0: lowerCamelCase_ =get_diffusers_versions() # cut ".dev0" lowerCamelCase_ ='''v''' + '''.'''.join(__version__.split('''.''' )[:3] ) # retrieve github version that matches if revision is None: lowerCamelCase_ =latest_version if latest_version[1:] in available_versions else '''main''' logger.info(F'''Defaulting to latest_version: {revision}.''' ) elif revision in available_versions: lowerCamelCase_ =F'''v{revision}''' elif revision == "main": lowerCamelCase_ =revision else: raise ValueError( F'''`custom_revision`: {revision} does not exist. Please make sure to choose one of''' F''' {', '.join(available_versions + ['main'] )}.''' ) # community pipeline on GitHub lowerCamelCase_ =COMMUNITY_PIPELINES_URL.format(revision=__snake_case , pipeline=__snake_case ) try: lowerCamelCase_ =cached_download( __snake_case , cache_dir=__snake_case , force_download=__snake_case , proxies=__snake_case , resume_download=__snake_case , local_files_only=__snake_case , use_auth_token=__snake_case , ) lowerCamelCase_ ='''git''' lowerCamelCase_ =pretrained_model_name_or_path + '''.py''' except EnvironmentError: logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' ) raise else: try: # Load from URL or cache if already cached lowerCamelCase_ =hf_hub_download( __snake_case , __snake_case , cache_dir=__snake_case , force_download=__snake_case , proxies=__snake_case , resume_download=__snake_case , local_files_only=__snake_case , use_auth_token=__snake_case , ) lowerCamelCase_ =os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) ) except EnvironmentError: logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' ) raise # Check we have all the requirements in our environment lowerCamelCase_ =check_imports(__snake_case ) # Now we move the module inside our cached dynamic modules. lowerCamelCase_ =DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule create_dynamic_module(__snake_case ) lowerCamelCase_ =Path(__snake_case ) / full_submodule if submodule == "local" or submodule == "git": # We always copy local files (we could hash the file to see if there was a change, and give them the name of # that hash, to only copy when there is a modification but it seems overkill for now). # The only reason we do the copy is to avoid putting too many folders in sys.path. shutil.copy(__snake_case , submodule_path / module_file ) for module_needed in modules_needed: lowerCamelCase_ =F'''{module_needed}.py''' shutil.copy(os.path.join(__snake_case , __snake_case ) , submodule_path / module_needed ) else: # Get the commit hash # TODO: we will get this info in the etag soon, so retrieve it from there and not here. if isinstance(__snake_case , __snake_case ): lowerCamelCase_ =use_auth_token elif use_auth_token is True: lowerCamelCase_ =HfFolder.get_token() else: lowerCamelCase_ =None lowerCamelCase_ =model_info(__snake_case , revision=__snake_case , token=__snake_case ).sha # The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the # benefit of versioning. lowerCamelCase_ =submodule_path / commit_hash lowerCamelCase_ =full_submodule + os.path.sep + commit_hash create_dynamic_module(__snake_case ) if not (submodule_path / module_file).exists(): shutil.copy(__snake_case , submodule_path / module_file ) # Make sure we also have every file with relative for module_needed in modules_needed: if not (submodule_path / module_needed).exists(): get_cached_module_file( __snake_case , F'''{module_needed}.py''' , cache_dir=__snake_case , force_download=__snake_case , resume_download=__snake_case , proxies=__snake_case , use_auth_token=__snake_case , revision=__snake_case , local_files_only=__snake_case , ) return os.path.join(__snake_case , __snake_case ) def a_ ( __snake_case : Union[str, os.PathLike] , __snake_case : str , __snake_case : Optional[str] = None , __snake_case : Optional[Union[str, os.PathLike]] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[Dict[str, str]] = None , __snake_case : Optional[Union[bool, str]] = None , __snake_case : Optional[str] = None , __snake_case : bool = False , **__snake_case : Optional[int] , ) -> Optional[int]: """simple docstring""" lowerCamelCase_ =get_cached_module_file( __snake_case , __snake_case , cache_dir=__snake_case , force_download=__snake_case , resume_download=__snake_case , proxies=__snake_case , use_auth_token=__snake_case , revision=__snake_case , local_files_only=__snake_case , ) return get_class_in_module(__snake_case , final_module.replace('''.py''' , '''''' ) )
75
1
'''simple docstring''' from bisect import bisect from itertools import accumulate def a_ ( __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : List[Any] , __snake_case : str ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ =sorted(zip(__snake_case , __snake_case ) , key=lambda __snake_case : x[0] / x[1] , reverse=__snake_case ) lowerCamelCase_, lowerCamelCase_ =[i[0] for i in r], [i[1] for i in r] lowerCamelCase_ =list(accumulate(__snake_case ) ) lowerCamelCase_ =bisect(__snake_case , __snake_case ) return ( 0 if k == 0 else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k]) if k != n else sum(vl[:k] ) ) if __name__ == "__main__": import doctest doctest.testmod()
75
'''simple docstring''' a_ : Any = [ 9_99, 8_00, 7_99, 6_00, 5_99, 5_00, 4_00, 3_99, 3_77, 3_55, 3_33, 3_11, 2_88, 2_66, 2_44, 2_22, 2_00, 1_99, 1_77, 1_55, 1_33, 1_11, 88, 66, 44, 22, 0, ] a_ : Any = [ 9_99, 9_76, 9_52, 9_28, 9_05, 8_82, 8_58, 8_57, 8_10, 7_62, 7_15, 7_14, 5_72, 4_29, 4_28, 2_86, 2_85, 2_38, 1_90, 1_43, 1_42, 1_18, 95, 71, 47, 24, 0, ] a_ : Optional[Any] = [ 9_99, 9_88, 9_77, 9_66, 9_55, 9_44, 9_33, 9_22, 9_11, 9_00, 8_99, 8_79, 8_59, 8_40, 8_20, 8_00, 7_99, 7_66, 7_33, 7_00, 6_99, 6_50, 6_00, 5_99, 5_00, 4_99, 4_00, 3_99, 3_50, 3_00, 2_99, 2_66, 2_33, 2_00, 1_99, 1_79, 1_59, 1_40, 1_20, 1_00, 99, 88, 77, 66, 55, 44, 33, 22, 11, 0, ] a_ : str = [ 9_99, 9_95, 9_92, 9_89, 9_85, 9_81, 9_78, 9_75, 9_71, 9_67, 9_64, 9_61, 9_57, 9_56, 9_51, 9_47, 9_42, 9_37, 9_33, 9_28, 9_23, 9_19, 9_14, 9_13, 9_08, 9_03, 8_97, 8_92, 8_87, 8_81, 8_76, 8_71, 8_70, 8_64, 8_58, 8_52, 8_46, 8_40, 8_34, 8_28, 8_27, 8_20, 8_13, 8_06, 7_99, 7_92, 7_85, 7_84, 7_77, 7_70, 7_63, 7_56, 7_49, 7_42, 7_41, 7_33, 7_24, 7_16, 7_07, 6_99, 6_98, 6_88, 6_77, 6_66, 6_56, 6_55, 6_45, 6_34, 6_23, 6_13, 6_12, 5_98, 5_84, 5_70, 5_69, 5_55, 5_41, 5_27, 5_26, 5_05, 4_84, 4_83, 4_62, 4_40, 4_39, 3_96, 3_95, 3_52, 3_51, 3_08, 3_07, 2_64, 2_63, 2_20, 2_19, 1_76, 1_32, 88, 44, 0, ] a_ : Optional[int] = [ 9_99, 9_97, 9_95, 9_92, 9_90, 9_88, 9_86, 9_84, 9_81, 9_79, 9_77, 9_75, 9_72, 9_70, 9_68, 9_66, 9_64, 9_61, 9_59, 9_57, 9_56, 9_54, 9_51, 9_49, 9_46, 9_44, 9_41, 9_39, 9_36, 9_34, 9_31, 9_29, 9_26, 9_24, 9_21, 9_19, 9_16, 9_14, 9_13, 9_10, 9_07, 9_05, 9_02, 8_99, 8_96, 8_93, 8_91, 8_88, 8_85, 8_82, 8_79, 8_77, 8_74, 8_71, 8_70, 8_67, 8_64, 8_61, 8_58, 8_55, 8_52, 8_49, 8_46, 8_43, 8_40, 8_37, 8_34, 8_31, 8_28, 8_27, 8_24, 8_21, 8_17, 8_14, 8_11, 8_08, 8_04, 8_01, 7_98, 7_95, 7_91, 7_88, 7_85, 7_84, 7_80, 7_77, 7_74, 7_70, 7_66, 7_63, 7_60, 7_56, 7_52, 7_49, 7_46, 7_42, 7_41, 7_37, 7_33, 7_30, 7_26, 7_22, 7_18, 7_14, 7_10, 7_07, 7_03, 6_99, 6_98, 6_94, 6_90, 6_85, 6_81, 6_77, 6_73, 6_69, 6_64, 6_60, 6_56, 6_55, 6_50, 6_46, 6_41, 6_36, 6_32, 6_27, 6_22, 6_18, 6_13, 6_12, 6_07, 6_02, 5_96, 5_91, 5_86, 5_80, 5_75, 5_70, 5_69, 5_63, 5_57, 5_51, 5_45, 5_39, 5_33, 5_27, 5_26, 5_19, 5_12, 5_05, 4_98, 4_91, 4_84, 4_83, 4_74, 4_66, 4_57, 4_49, 4_40, 4_39, 4_28, 4_18, 4_07, 3_96, 3_95, 3_81, 3_66, 3_52, 3_51, 3_30, 3_08, 3_07, 2_86, 2_64, 2_63, 2_42, 2_20, 2_19, 1_76, 1_75, 1_32, 1_31, 88, 44, 0, ] a_ : Dict = [ 9_99, 9_91, 9_82, 9_74, 9_66, 9_58, 9_50, 9_41, 9_33, 9_25, 9_16, 9_08, 9_00, 8_99, 8_74, 8_50, 8_25, 8_00, 7_99, 7_00, 6_00, 5_00, 4_00, 3_00, 2_00, 1_00, 0, ] a_ : Tuple = [ 9_99, 9_92, 9_85, 9_78, 9_71, 9_64, 9_57, 9_49, 9_42, 9_35, 9_28, 9_21, 9_14, 9_07, 9_00, 8_99, 8_79, 8_59, 8_40, 8_20, 8_00, 7_99, 7_66, 7_33, 7_00, 6_99, 6_50, 6_00, 5_99, 5_00, 4_99, 4_00, 3_99, 3_00, 2_99, 2_00, 1_99, 1_00, 99, 0, ] a_ : Any = [ 9_99, 9_96, 9_92, 9_89, 9_85, 9_82, 9_79, 9_75, 9_72, 9_68, 9_65, 9_61, 9_58, 9_55, 9_51, 9_48, 9_44, 9_41, 9_38, 9_34, 9_31, 9_27, 9_24, 9_20, 9_17, 9_14, 9_10, 9_07, 9_03, 9_00, 8_99, 8_91, 8_84, 8_76, 8_69, 8_61, 8_53, 8_46, 8_38, 8_30, 8_23, 8_15, 8_08, 8_00, 7_99, 7_88, 7_77, 7_66, 7_55, 7_44, 7_33, 7_22, 7_11, 7_00, 6_99, 6_88, 6_77, 6_66, 6_55, 6_44, 6_33, 6_22, 6_11, 6_00, 5_99, 5_85, 5_71, 5_57, 5_42, 5_28, 5_14, 5_00, 4_99, 4_85, 4_71, 4_57, 4_42, 4_28, 4_14, 4_00, 3_99, 3_79, 3_59, 3_40, 3_20, 3_00, 2_99, 2_79, 2_59, 2_40, 2_20, 2_00, 1_99, 1_66, 1_33, 1_00, 99, 66, 33, 0, ]
75
1
'''simple docstring''' import shutil import tempfile import unittest from unittest.mock import patch from transformers import ( DefaultFlowCallback, IntervalStrategy, PrinterCallback, ProgressCallback, Trainer, TrainerCallback, TrainingArguments, is_torch_available, ) from transformers.testing_utils import require_torch if is_torch_available(): from transformers.trainer import DEFAULT_CALLBACKS from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self ): """simple docstring""" lowerCamelCase_ =[] def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" self.events.append('''on_init_end''' ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" self.events.append('''on_train_begin''' ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" self.events.append('''on_train_end''' ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" self.events.append('''on_epoch_begin''' ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" self.events.append('''on_epoch_end''' ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" self.events.append('''on_step_begin''' ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" self.events.append('''on_step_end''' ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" self.events.append('''on_evaluate''' ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" self.events.append('''on_predict''' ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" self.events.append('''on_save''' ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" self.events.append('''on_log''' ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" self.events.append('''on_prediction_step''' ) @require_torch class __UpperCamelCase ( unittest.TestCase ): def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =tempfile.mkdtemp() def lowercase__ ( self ): """simple docstring""" shutil.rmtree(self.output_dir ) def lowercase__ ( self, lowerCAmelCase=0, lowerCAmelCase=0, lowerCAmelCase=64, lowerCAmelCase=64, lowerCAmelCase=None, lowerCAmelCase=False, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =RegressionDataset(length=lowerCAmelCase ) lowerCamelCase_ =RegressionDataset(length=lowerCAmelCase ) lowerCamelCase_ =RegressionModelConfig(a=lowerCAmelCase, b=lowerCAmelCase ) lowerCamelCase_ =RegressionPreTrainedModel(lowerCAmelCase ) lowerCamelCase_ =TrainingArguments(self.output_dir, disable_tqdm=lowerCAmelCase, report_to=[], **lowerCAmelCase ) return Trainer( lowerCAmelCase, lowerCAmelCase, train_dataset=lowerCAmelCase, eval_dataset=lowerCAmelCase, callbacks=lowerCAmelCase, ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" self.assertEqual(len(lowerCAmelCase ), len(lowerCAmelCase ) ) # Order doesn't matter lowerCamelCase_ =sorted(lowerCAmelCase, key=lambda lowerCAmelCase : cb.__name__ if isinstance(lowerCAmelCase, lowerCAmelCase ) else cb.__class__.__name__ ) lowerCamelCase_ =sorted(lowerCAmelCase, key=lambda lowerCAmelCase : cb.__name__ if isinstance(lowerCAmelCase, lowerCAmelCase ) else cb.__class__.__name__ ) for cba, cba in zip(lowerCAmelCase, lowerCAmelCase ): if isinstance(lowerCAmelCase, lowerCAmelCase ) and isinstance(lowerCAmelCase, lowerCAmelCase ): self.assertEqual(lowerCAmelCase, lowerCAmelCase ) elif isinstance(lowerCAmelCase, lowerCAmelCase ) and not isinstance(lowerCAmelCase, lowerCAmelCase ): self.assertEqual(lowerCAmelCase, cba.__class__ ) elif not isinstance(lowerCAmelCase, lowerCAmelCase ) and isinstance(lowerCAmelCase, lowerCAmelCase ): self.assertEqual(cba.__class__, lowerCAmelCase ) else: self.assertEqual(lowerCAmelCase, lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =['''on_init_end''', '''on_train_begin'''] lowerCamelCase_ =0 lowerCamelCase_ =len(trainer.get_eval_dataloader() ) lowerCamelCase_ =['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate'''] for _ in range(trainer.state.num_train_epochs ): expected_events.append('''on_epoch_begin''' ) for _ in range(lowerCAmelCase ): step += 1 expected_events += ["on_step_begin", "on_step_end"] if step % trainer.args.logging_steps == 0: expected_events.append('''on_log''' ) if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0: expected_events += evaluation_events.copy() if step % trainer.args.save_steps == 0: expected_events.append('''on_save''' ) expected_events.append('''on_epoch_end''' ) if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH: expected_events += evaluation_events.copy() expected_events += ["on_log", "on_train_end"] return expected_events def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_trainer() lowerCamelCase_ =DEFAULT_CALLBACKS.copy() + [ProgressCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks, lowerCAmelCase ) # Callbacks passed at init are added to the default callbacks lowerCamelCase_ =self.get_trainer(callbacks=[MyTestTrainerCallback] ) expected_callbacks.append(lowerCAmelCase ) self.check_callbacks_equality(trainer.callback_handler.callbacks, lowerCAmelCase ) # TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback lowerCamelCase_ =self.get_trainer(disable_tqdm=lowerCAmelCase ) lowerCamelCase_ =DEFAULT_CALLBACKS.copy() + [PrinterCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =DEFAULT_CALLBACKS.copy() + [ProgressCallback] lowerCamelCase_ =self.get_trainer() # We can add, pop, or remove by class name trainer.remove_callback(lowerCAmelCase ) expected_callbacks.remove(lowerCAmelCase ) self.check_callbacks_equality(trainer.callback_handler.callbacks, lowerCAmelCase ) lowerCamelCase_ =self.get_trainer() lowerCamelCase_ =trainer.pop_callback(lowerCAmelCase ) self.assertEqual(cb.__class__, lowerCAmelCase ) self.check_callbacks_equality(trainer.callback_handler.callbacks, lowerCAmelCase ) trainer.add_callback(lowerCAmelCase ) expected_callbacks.insert(0, lowerCAmelCase ) self.check_callbacks_equality(trainer.callback_handler.callbacks, lowerCAmelCase ) # We can also add, pop, or remove by instance lowerCamelCase_ =self.get_trainer() lowerCamelCase_ =trainer.callback_handler.callbacks[0] trainer.remove_callback(lowerCAmelCase ) expected_callbacks.remove(lowerCAmelCase ) self.check_callbacks_equality(trainer.callback_handler.callbacks, lowerCAmelCase ) lowerCamelCase_ =self.get_trainer() lowerCamelCase_ =trainer.callback_handler.callbacks[0] lowerCamelCase_ =trainer.pop_callback(lowerCAmelCase ) self.assertEqual(lowerCAmelCase, lowerCAmelCase ) self.check_callbacks_equality(trainer.callback_handler.callbacks, lowerCAmelCase ) trainer.add_callback(lowerCAmelCase ) expected_callbacks.insert(0, lowerCAmelCase ) self.check_callbacks_equality(trainer.callback_handler.callbacks, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" import warnings # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested warnings.simplefilter(action='''ignore''', category=lowerCAmelCase ) lowerCamelCase_ =self.get_trainer(callbacks=[MyTestTrainerCallback] ) trainer.train() lowerCamelCase_ =trainer.callback_handler.callbacks[-2].events self.assertEqual(lowerCAmelCase, self.get_expected_events(lowerCAmelCase ) ) # Independent log/save/eval lowerCamelCase_ =self.get_trainer(callbacks=[MyTestTrainerCallback], logging_steps=5 ) trainer.train() lowerCamelCase_ =trainer.callback_handler.callbacks[-2].events self.assertEqual(lowerCAmelCase, self.get_expected_events(lowerCAmelCase ) ) lowerCamelCase_ =self.get_trainer(callbacks=[MyTestTrainerCallback], save_steps=5 ) trainer.train() lowerCamelCase_ =trainer.callback_handler.callbacks[-2].events self.assertEqual(lowerCAmelCase, self.get_expected_events(lowerCAmelCase ) ) lowerCamelCase_ =self.get_trainer(callbacks=[MyTestTrainerCallback], eval_steps=5, evaluation_strategy='''steps''' ) trainer.train() lowerCamelCase_ =trainer.callback_handler.callbacks[-2].events self.assertEqual(lowerCAmelCase, self.get_expected_events(lowerCAmelCase ) ) lowerCamelCase_ =self.get_trainer(callbacks=[MyTestTrainerCallback], evaluation_strategy='''epoch''' ) trainer.train() lowerCamelCase_ =trainer.callback_handler.callbacks[-2].events self.assertEqual(lowerCAmelCase, self.get_expected_events(lowerCAmelCase ) ) # A bit of everything lowerCamelCase_ =self.get_trainer( callbacks=[MyTestTrainerCallback], logging_steps=3, save_steps=10, eval_steps=5, evaluation_strategy='''steps''', ) trainer.train() lowerCamelCase_ =trainer.callback_handler.callbacks[-2].events self.assertEqual(lowerCAmelCase, self.get_expected_events(lowerCAmelCase ) ) # warning should be emitted for duplicated callbacks with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock: lowerCamelCase_ =self.get_trainer( callbacks=[MyTestTrainerCallback, MyTestTrainerCallback], ) assert str(lowerCAmelCase ) in warn_mock.call_args[0][0]
75
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) a_ : Union[str, Any] = { """configuration_funnel""": ["""FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FunnelConfig"""], """convert_funnel_original_tf_checkpoint_to_pytorch""": [], """tokenization_funnel""": ["""FunnelTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : List[str] = ["""FunnelTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[int] = [ """FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""", """FunnelBaseModel""", """FunnelForMaskedLM""", """FunnelForMultipleChoice""", """FunnelForPreTraining""", """FunnelForQuestionAnswering""", """FunnelForSequenceClassification""", """FunnelForTokenClassification""", """FunnelModel""", """FunnelPreTrainedModel""", """load_tf_weights_in_funnel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[Any] = [ """TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFFunnelBaseModel""", """TFFunnelForMaskedLM""", """TFFunnelForMultipleChoice""", """TFFunnelForPreTraining""", """TFFunnelForQuestionAnswering""", """TFFunnelForSequenceClassification""", """TFFunnelForTokenClassification""", """TFFunnelModel""", """TFFunnelPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig from .tokenization_funnel import FunnelTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_funnel_fast import FunnelTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_funnel import ( FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, FunnelBaseModel, FunnelForMaskedLM, FunnelForMultipleChoice, FunnelForPreTraining, FunnelForQuestionAnswering, FunnelForSequenceClassification, FunnelForTokenClassification, FunnelModel, FunnelPreTrainedModel, load_tf_weights_in_funnel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_funnel import ( TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, TFFunnelPreTrainedModel, ) else: import sys a_ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
75
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : List[Any] = logging.get_logger(__name__) a_ : int = { """weiweishi/roc-bert-base-zh""": """https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json""", } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : int ='roc_bert' def __init__( self, lowerCAmelCase=30_522, lowerCAmelCase=768, lowerCAmelCase=12, lowerCAmelCase=12, lowerCAmelCase=3_072, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=2, lowerCAmelCase=0.0_2, lowerCAmelCase=1e-12, lowerCAmelCase=True, lowerCAmelCase=0, lowerCAmelCase="absolute", lowerCAmelCase=None, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=768, lowerCAmelCase=910, lowerCAmelCase=512, lowerCAmelCase=24_858, lowerCAmelCase=True, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =vocab_size lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =hidden_size lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =intermediate_size lowerCamelCase_ =hidden_act lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =initializer_range lowerCamelCase_ =type_vocab_size lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =use_cache lowerCamelCase_ =enable_pronunciation lowerCamelCase_ =enable_shape lowerCamelCase_ =pronunciation_embed_dim lowerCamelCase_ =pronunciation_vocab_size lowerCamelCase_ =shape_embed_dim lowerCamelCase_ =shape_vocab_size lowerCamelCase_ =concat_input lowerCamelCase_ =position_embedding_type lowerCamelCase_ =classifier_dropout super().__init__(pad_token_id=lowerCAmelCase, **lowerCAmelCase )
75
'''simple docstring''' import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoProcessor from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wavaveca.test_feature_extraction_wavaveca import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput if is_torch_available(): from transformers import WavaVecaForCTC @require_pyctcdecode class __UpperCamelCase ( unittest.TestCase ): def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split() lowerCamelCase_ =dict(zip(lowerCAmelCase, range(len(lowerCAmelCase ) ) ) ) lowerCamelCase_ ={ '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>''', } lowerCamelCase_ ={ '''feature_size''': 1, '''padding_value''': 0.0, '''sampling_rate''': 16_000, '''return_attention_mask''': False, '''do_normalize''': True, } lowerCamelCase_ =tempfile.mkdtemp() lowerCamelCase_ =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCamelCase_ =os.path.join(self.tmpdirname, lowerCAmelCase ) with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp: fp.write(json.dumps(lowerCAmelCase ) + '''\n''' ) with open(self.feature_extraction_file, '''w''', encoding='''utf-8''' ) as fp: fp.write(json.dumps(lowerCAmelCase ) + '''\n''' ) # load decoder from hub lowerCamelCase_ ='''hf-internal-testing/ngram-beam-search-decoder''' def lowercase__ ( self, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.add_kwargs_tokens_map.copy() kwargs.update(lowerCAmelCase ) return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase ) def lowercase__ ( self, **lowerCAmelCase ): """simple docstring""" return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname, **lowerCAmelCase ) def lowercase__ ( self, **lowerCAmelCase ): """simple docstring""" return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name, **lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_feature_extractor() lowerCamelCase_ =self.get_decoder() lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname ) # tokenizer self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer, lowerCAmelCase ) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor, lowerCAmelCase ) # decoder self.assertEqual(processor.decoder._alphabet.labels, decoder._alphabet.labels ) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set, decoder.model_container[decoder._model_key]._unigram_set, ) self.assertIsInstance(processor.decoder, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =WavaVecaProcessorWithLM( tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor(), decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname ) # make sure that error is thrown when decoder alphabet doesn't match lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained( self.tmpdirname, alpha=5.0, beta=3.0, score_boundary=-7.0, unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha, 5.0 ) self.assertEqual(processor.language_model.beta, 3.0 ) self.assertEqual(processor.language_model.score_boundary, -7.0 ) self.assertEqual(processor.language_model.unk_score_offset, 3 ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(['''xx'''] ) with self.assertRaisesRegex(lowerCAmelCase, '''include''' ): WavaVecaProcessorWithLM( tokenizer=lowerCAmelCase, feature_extractor=self.get_feature_extractor(), decoder=self.get_decoder() ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_feature_extractor() lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_decoder() lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase ) lowerCamelCase_ =floats_list((3, 1_000) ) lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''np''' ) lowerCamelCase_ =processor(lowerCAmelCase, return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2 ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_feature_extractor() lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_decoder() lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase ) lowerCamelCase_ ='''This is a test string''' lowerCamelCase_ =processor(text=lowerCAmelCase ) lowerCamelCase_ =tokenizer(lowerCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key] ) def lowercase__ ( self, lowerCAmelCase=(2, 10, 16), lowerCAmelCase=77 ): """simple docstring""" np.random.seed(lowerCAmelCase ) return np.random.rand(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_feature_extractor() lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_decoder() lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase ) lowerCamelCase_ =self._get_dummy_logits(shape=(10, 16), seed=13 ) lowerCamelCase_ =processor.decode(lowerCAmelCase ) lowerCamelCase_ =decoder.decode_beams(lowerCAmelCase )[0] self.assertEqual(decoded_decoder[0], decoded_processor.text ) self.assertEqual('''</s> <s> </s>''', decoded_processor.text ) self.assertEqual(decoded_decoder[-2], decoded_processor.logit_score ) self.assertEqual(decoded_decoder[-1], decoded_processor.lm_score ) @parameterized.expand([[None], ['''fork'''], ['''spawn''']] ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.get_feature_extractor() lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_decoder() lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase ) lowerCamelCase_ =self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: lowerCamelCase_ =processor.batch_decode(lowerCAmelCase ) else: with get_context(lowerCAmelCase ).Pool() as pool: lowerCamelCase_ =processor.batch_decode(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =list(lowerCAmelCase ) with get_context('''fork''' ).Pool() as p: lowerCamelCase_ =decoder.decode_beams_batch(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =[], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0] ) logit_scores_decoder.append(beams[0][-2] ) lm_scores_decoder.append(beams[0][-1] ) self.assertListEqual(lowerCAmelCase, decoded_processor.text ) self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''], decoded_processor.text ) self.assertListEqual(lowerCAmelCase, decoded_processor.logit_score ) self.assertListEqual(lowerCAmelCase, decoded_processor.lm_score ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_feature_extractor() lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_decoder() lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase ) lowerCamelCase_ =self._get_dummy_logits() lowerCamelCase_ =15 lowerCamelCase_ =-2_0.0 lowerCamelCase_ =-4.0 lowerCamelCase_ =processor.batch_decode( lowerCAmelCase, beam_width=lowerCAmelCase, beam_prune_logp=lowerCAmelCase, token_min_logp=lowerCAmelCase, ) lowerCamelCase_ =decoded_processor_out.text lowerCamelCase_ =list(lowerCAmelCase ) with get_context('''fork''' ).Pool() as pool: lowerCamelCase_ =decoder.decode_beams_batch( lowerCAmelCase, lowerCAmelCase, beam_width=lowerCAmelCase, beam_prune_logp=lowerCAmelCase, token_min_logp=lowerCAmelCase, ) lowerCamelCase_ =[d[0][0] for d in decoded_decoder_out] lowerCamelCase_ =[d[0][2] for d in decoded_decoder_out] lowerCamelCase_ =[d[0][3] for d in decoded_decoder_out] self.assertListEqual(lowerCAmelCase, lowerCAmelCase ) self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''], lowerCAmelCase ) self.assertTrue(np.array_equal(lowerCAmelCase, decoded_processor_out.logit_score ) ) self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7], lowerCAmelCase, atol=1e-3 ) ) self.assertTrue(np.array_equal(lowerCAmelCase, decoded_processor_out.lm_score ) ) self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4], lowerCAmelCase, atol=1e-3 ) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_feature_extractor() lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_decoder() lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase ) lowerCamelCase_ =self._get_dummy_logits() lowerCamelCase_ =2.0 lowerCamelCase_ =5.0 lowerCamelCase_ =-2_0.0 lowerCamelCase_ =True lowerCamelCase_ =processor.batch_decode( lowerCAmelCase, alpha=lowerCAmelCase, beta=lowerCAmelCase, unk_score_offset=lowerCAmelCase, lm_score_boundary=lowerCAmelCase, ) lowerCamelCase_ =decoded_processor_out.text lowerCamelCase_ =list(lowerCAmelCase ) decoder.reset_params( alpha=lowerCAmelCase, beta=lowerCAmelCase, unk_score_offset=lowerCAmelCase, lm_score_boundary=lowerCAmelCase, ) with get_context('''fork''' ).Pool() as pool: lowerCamelCase_ =decoder.decode_beams_batch( lowerCAmelCase, lowerCAmelCase, ) lowerCamelCase_ =[d[0][0] for d in decoded_decoder_out] self.assertListEqual(lowerCAmelCase, lowerCAmelCase ) self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''], lowerCAmelCase ) lowerCamelCase_ =processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha, 2.0 ) self.assertEqual(lm_model.beta, 5.0 ) self.assertEqual(lm_model.unk_score_offset, -2_0.0 ) self.assertEqual(lm_model.score_boundary, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) lowerCamelCase_ =processor.decoder.model_container[processor.decoder._model_key] lowerCamelCase_ =Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() lowerCamelCase_ =os.listdir(lowerCAmelCase ) lowerCamelCase_ =['''alphabet.json''', '''language_model'''] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(lowerCAmelCase, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =snapshot_download('''hf-internal-testing/processor_with_lm''' ) lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained(lowerCAmelCase ) lowerCamelCase_ =processor.decoder.model_container[processor.decoder._model_key] lowerCamelCase_ =Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() lowerCamelCase_ =os.listdir(lowerCAmelCase ) lowerCamelCase_ =os.listdir(lowerCAmelCase ) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(lowerCAmelCase, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) lowerCamelCase_ =AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' ) lowerCamelCase_ =floats_list((3, 1_000) ) lowerCamelCase_ =processor_wavaveca(lowerCAmelCase, return_tensors='''np''' ) lowerCamelCase_ =processor_auto(lowerCAmelCase, return_tensors='''np''' ) for key in input_wavaveca.keys(): self.assertAlmostEqual(input_wavaveca[key].sum(), input_auto[key].sum(), delta=1e-2 ) lowerCamelCase_ =self._get_dummy_logits() lowerCamelCase_ =processor_wavaveca.batch_decode(lowerCAmelCase ) lowerCamelCase_ =processor_auto.batch_decode(lowerCAmelCase ) self.assertListEqual(decoded_wavaveca.text, decoded_auto.text ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_feature_extractor() lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_decoder() lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase ) self.assertListEqual( processor.model_input_names, feature_extractor.model_input_names, msg='''`processor` and `feature_extractor` model input names do not match''', ) @staticmethod def lowercase__ ( lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =[d[key] for d in offsets] return retrieved_list def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) lowerCamelCase_ =self._get_dummy_logits()[0] lowerCamelCase_ =processor.decode(lowerCAmelCase, output_word_offsets=lowerCAmelCase ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ), 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(lowerCAmelCase, lowerCAmelCase ) ) self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''], '''word''' ) ), outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''], '''word''' ), ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''], '''start_offset''' ), [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''], '''end_offset''' ), [1, 3, 5] ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) lowerCamelCase_ =self._get_dummy_logits() lowerCamelCase_ =processor.batch_decode(lowerCAmelCase, output_word_offsets=lowerCAmelCase ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ), 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(lowerCAmelCase, lowerCAmelCase ) ) self.assertListEqual( [''' '''.join(self.get_from_offsets(lowerCAmelCase, '''word''' ) ) for o in outputs['''word_offsets''']], outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0], '''word''' ), ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0], '''start_offset''' ), [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0], '''end_offset''' ), [1, 3, 5] ) @slow @require_torch @require_torchaudio def lowercase__ ( self ): """simple docstring""" import torch lowerCamelCase_ =load_dataset('''common_voice''', '''en''', split='''train''', streaming=lowerCAmelCase ) lowerCamelCase_ =ds.cast_column('''audio''', datasets.Audio(sampling_rate=16_000 ) ) lowerCamelCase_ =iter(lowerCAmelCase ) lowerCamelCase_ =next(lowerCAmelCase ) lowerCamelCase_ =AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) lowerCamelCase_ =WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train lowerCamelCase_ =processor(sample['''audio''']['''array'''], return_tensors='''pt''' ).input_values with torch.no_grad(): lowerCamelCase_ =model(lowerCAmelCase ).logits.cpu().numpy() lowerCamelCase_ =processor.decode(logits[0], output_word_offsets=lowerCAmelCase ) lowerCamelCase_ =model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate lowerCamelCase_ =[ { '''start_time''': d['''start_offset'''] * time_offset, '''end_time''': d['''end_offset'''] * time_offset, '''word''': d['''word'''], } for d in output['''word_offsets'''] ] lowerCamelCase_ ='''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL''' # output words self.assertEqual(''' '''.join(self.get_from_offsets(lowerCAmelCase, '''word''' ) ), lowerCAmelCase ) self.assertEqual(''' '''.join(self.get_from_offsets(lowerCAmelCase, '''word''' ) ), output.text ) # output times lowerCamelCase_ =torch.tensor(self.get_from_offsets(lowerCAmelCase, '''start_time''' ) ) lowerCamelCase_ =torch.tensor(self.get_from_offsets(lowerCAmelCase, '''end_time''' ) ) # fmt: off lowerCamelCase_ =torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] ) lowerCamelCase_ =torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] ) # fmt: on self.assertTrue(torch.allclose(lowerCAmelCase, lowerCAmelCase, atol=0.0_1 ) ) self.assertTrue(torch.allclose(lowerCAmelCase, lowerCAmelCase, atol=0.0_1 ) )
75
1
'''simple docstring''' import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def a_ ( __snake_case : Union[str, Any] ) -> int: """simple docstring""" monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''' , set() ) @pytest.fixture def a_ ( __snake_case : Any ) -> Union[str, Any]: """simple docstring""" class __UpperCamelCase : def __init__( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =metric_id class __UpperCamelCase : lowercase : Dict =[MetricMock(lowerCamelCase__ ) for metric_id in ['accuracy', 'mse', 'precision', 'codeparrot/apps_metric']] def lowercase__ ( self ): """simple docstring""" return self._metrics monkeypatch.setattr('''datasets.inspect.huggingface_hub''' , HfhMock() ) @pytest.mark.parametrize( '''func, args''' , [(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))] ) def a_ ( __snake_case : int , __snake_case : int , __snake_case : List[Any] , __snake_case : int , __snake_case : Any ) -> Optional[Any]: """simple docstring""" if "tmp_path" in args: lowerCamelCase_ =tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args ) with pytest.warns(__snake_case , match='''https://huggingface.co/docs/evaluate''' ): func(*__snake_case )
75
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPixaPixPipeline, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase : List[Any] =StableDiffusionInstructPixaPixPipeline lowercase : List[Any] =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'} lowercase : Optional[Any] =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS lowercase : Union[str, Any] =IMAGE_TO_IMAGE_IMAGE_PARAMS lowercase : List[Any] =IMAGE_TO_IMAGE_IMAGE_PARAMS def lowercase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCamelCase_ =UNetaDConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=8, out_channels=4, down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D'''), up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D'''), cross_attention_dim=32, ) lowerCamelCase_ =PNDMScheduler(skip_prk_steps=lowerCAmelCase ) torch.manual_seed(0 ) lowerCamelCase_ =AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], latent_channels=4, ) torch.manual_seed(0 ) lowerCamelCase_ =CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, ) lowerCamelCase_ =CLIPTextModel(lowerCAmelCase ) lowerCamelCase_ =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) lowerCamelCase_ ={ '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=0 ): """simple docstring""" lowerCamelCase_ =floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase ) lowerCamelCase_ =image.cpu().permute(0, 2, 3, 1 )[0] lowerCamelCase_ =Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('''RGB''' ) if str(lowerCAmelCase ).startswith('''mps''' ): lowerCamelCase_ =torch.manual_seed(lowerCAmelCase ) else: lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase ) lowerCamelCase_ ={ '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''image_guidance_scale''': 1, '''output_type''': '''numpy''', } return inputs def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCamelCase_ =self.get_dummy_components() lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase ) lowerCamelCase_ =sd_pipe.to(lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase ) lowerCamelCase_ =sd_pipe(**lowerCAmelCase ).images lowerCamelCase_ =image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCamelCase_ =np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCamelCase_ =self.get_dummy_components() lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase ) lowerCamelCase_ =sd_pipe.to(lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase ) lowerCamelCase_ ='''french fries''' lowerCamelCase_ =sd_pipe(**lowerCAmelCase, negative_prompt=lowerCAmelCase ) lowerCamelCase_ =output.images lowerCamelCase_ =image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCamelCase_ =np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCamelCase_ =self.get_dummy_components() lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase ) lowerCamelCase_ =sd_pipe.to(lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase ) lowerCamelCase_ =[inputs['''prompt''']] * 2 lowerCamelCase_ =np.array(inputs['''image'''] ).astype(np.floataa ) / 2_5_5.0 lowerCamelCase_ =torch.from_numpy(lowerCAmelCase ).unsqueeze(0 ).to(lowerCAmelCase ) lowerCamelCase_ =image / 2 + 0.5 lowerCamelCase_ =image.permute(0, 3, 1, 2 ) lowerCamelCase_ =image.repeat(2, 1, 1, 1 ) lowerCamelCase_ =sd_pipe(**lowerCAmelCase ).images lowerCamelCase_ =image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) lowerCamelCase_ =np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCamelCase_ =self.get_dummy_components() lowerCamelCase_ =EulerAncestralDiscreteScheduler( beta_start=0.0_0_0_8_5, beta_end=0.0_1_2, beta_schedule='''scaled_linear''' ) lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase ) lowerCamelCase_ =sd_pipe.to(lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase ) lowerCamelCase_ =sd_pipe(**lowerCAmelCase ).images lowerCamelCase_ =image[0, -3:, -3:, -1] lowerCamelCase_ =[round(lowerCAmelCase, 4 ) for x in image_slice.flatten().tolist()] print(''','''.join([str(lowerCAmelCase ) for x in slice] ) ) assert image.shape == (1, 32, 32, 3) lowerCamelCase_ =np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def lowercase__ ( self ): """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_dummy_components() lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase ) lowerCamelCase_ =VaeImageProcessor(do_resize=lowerCAmelCase, do_normalize=lowerCAmelCase ) lowerCamelCase_ =pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =pipe(**self.get_dummy_inputs_by_type(lowerCAmelCase, input_image_type='''pt''' ) )[0] lowerCamelCase_ =components['''vae'''] lowerCamelCase_ =self.get_dummy_inputs_by_type(lowerCAmelCase, input_image_type='''pt''' ) for image_param in self.image_latents_params: if image_param in inputs.keys(): lowerCamelCase_ =vae.encode(inputs[image_param] ).latent_dist.mode() lowerCamelCase_ =pipe(**lowerCAmelCase )[0] lowerCamelCase_ =np.abs(out - out_latents_inputs ).max() self.assertLess(lowerCAmelCase, 1e-4, '''passing latents as image input generate different result from passing image''' ) @slow @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): def lowercase__ ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase__ ( self, lowerCAmelCase=0 ): """simple docstring""" lowerCamelCase_ =torch.manual_seed(lowerCAmelCase ) lowerCamelCase_ =load_image( '''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' ) lowerCamelCase_ ={ '''prompt''': '''turn him into a cyborg''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''image_guidance_scale''': 1.0, '''output_type''': '''numpy''', } return inputs def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''', safety_checker=lowerCAmelCase ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) pipe.enable_attention_slicing() lowerCamelCase_ =self.get_inputs() lowerCamelCase_ =pipe(**lowerCAmelCase ).images lowerCamelCase_ =image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) lowerCamelCase_ =np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''', safety_checker=lowerCAmelCase ) lowerCamelCase_ =LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) pipe.enable_attention_slicing() lowerCamelCase_ =self.get_inputs() lowerCamelCase_ =pipe(**lowerCAmelCase ).images lowerCamelCase_ =image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) lowerCamelCase_ =np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''', safety_checker=lowerCAmelCase ) lowerCamelCase_ =DDIMScheduler.from_config(pipe.scheduler.config ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) pipe.enable_attention_slicing() lowerCamelCase_ =self.get_inputs() lowerCamelCase_ =pipe(**lowerCAmelCase ).images lowerCamelCase_ =image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) lowerCamelCase_ =np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =0 def callback_fn(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) -> None: lowerCamelCase_ =True nonlocal number_of_steps number_of_steps += 1 if step == 1: lowerCamelCase_ =latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) lowerCamelCase_ =latents[0, -3:, -3:, -1] lowerCamelCase_ =np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 elif step == 2: lowerCamelCase_ =latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) lowerCamelCase_ =latents[0, -3:, -3:, -1] lowerCamelCase_ =np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 lowerCamelCase_ =False lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''', safety_checker=lowerCAmelCase, torch_dtype=torch.floataa ) lowerCamelCase_ =pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) pipe.enable_attention_slicing() lowerCamelCase_ =self.get_inputs() pipe(**lowerCAmelCase, callback=lowerCAmelCase, callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def lowercase__ ( self ): """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''', safety_checker=lowerCAmelCase, torch_dtype=torch.floataa ) lowerCamelCase_ =pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() lowerCamelCase_ =self.get_inputs() lowerCamelCase_ =pipe(**lowerCAmelCase ) lowerCamelCase_ =torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 10**9 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 lowerCamelCase_ =inputs['''image'''].resize((504, 504) ) lowerCamelCase_ ='''timbrooks/instruct-pix2pix''' lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained( lowerCAmelCase, safety_checker=lowerCAmelCase, ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) pipe.enable_attention_slicing() lowerCamelCase_ =pipe(**lowerCAmelCase ) lowerCamelCase_ =output.images[0] lowerCamelCase_ =image[255:258, 383:386, -1] assert image.shape == (504, 504, 3) lowerCamelCase_ =np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
75
1
'''simple docstring''' a_ : Any = [ 9_99, 8_00, 7_99, 6_00, 5_99, 5_00, 4_00, 3_99, 3_77, 3_55, 3_33, 3_11, 2_88, 2_66, 2_44, 2_22, 2_00, 1_99, 1_77, 1_55, 1_33, 1_11, 88, 66, 44, 22, 0, ] a_ : Any = [ 9_99, 9_76, 9_52, 9_28, 9_05, 8_82, 8_58, 8_57, 8_10, 7_62, 7_15, 7_14, 5_72, 4_29, 4_28, 2_86, 2_85, 2_38, 1_90, 1_43, 1_42, 1_18, 95, 71, 47, 24, 0, ] a_ : Optional[Any] = [ 9_99, 9_88, 9_77, 9_66, 9_55, 9_44, 9_33, 9_22, 9_11, 9_00, 8_99, 8_79, 8_59, 8_40, 8_20, 8_00, 7_99, 7_66, 7_33, 7_00, 6_99, 6_50, 6_00, 5_99, 5_00, 4_99, 4_00, 3_99, 3_50, 3_00, 2_99, 2_66, 2_33, 2_00, 1_99, 1_79, 1_59, 1_40, 1_20, 1_00, 99, 88, 77, 66, 55, 44, 33, 22, 11, 0, ] a_ : str = [ 9_99, 9_95, 9_92, 9_89, 9_85, 9_81, 9_78, 9_75, 9_71, 9_67, 9_64, 9_61, 9_57, 9_56, 9_51, 9_47, 9_42, 9_37, 9_33, 9_28, 9_23, 9_19, 9_14, 9_13, 9_08, 9_03, 8_97, 8_92, 8_87, 8_81, 8_76, 8_71, 8_70, 8_64, 8_58, 8_52, 8_46, 8_40, 8_34, 8_28, 8_27, 8_20, 8_13, 8_06, 7_99, 7_92, 7_85, 7_84, 7_77, 7_70, 7_63, 7_56, 7_49, 7_42, 7_41, 7_33, 7_24, 7_16, 7_07, 6_99, 6_98, 6_88, 6_77, 6_66, 6_56, 6_55, 6_45, 6_34, 6_23, 6_13, 6_12, 5_98, 5_84, 5_70, 5_69, 5_55, 5_41, 5_27, 5_26, 5_05, 4_84, 4_83, 4_62, 4_40, 4_39, 3_96, 3_95, 3_52, 3_51, 3_08, 3_07, 2_64, 2_63, 2_20, 2_19, 1_76, 1_32, 88, 44, 0, ] a_ : Optional[int] = [ 9_99, 9_97, 9_95, 9_92, 9_90, 9_88, 9_86, 9_84, 9_81, 9_79, 9_77, 9_75, 9_72, 9_70, 9_68, 9_66, 9_64, 9_61, 9_59, 9_57, 9_56, 9_54, 9_51, 9_49, 9_46, 9_44, 9_41, 9_39, 9_36, 9_34, 9_31, 9_29, 9_26, 9_24, 9_21, 9_19, 9_16, 9_14, 9_13, 9_10, 9_07, 9_05, 9_02, 8_99, 8_96, 8_93, 8_91, 8_88, 8_85, 8_82, 8_79, 8_77, 8_74, 8_71, 8_70, 8_67, 8_64, 8_61, 8_58, 8_55, 8_52, 8_49, 8_46, 8_43, 8_40, 8_37, 8_34, 8_31, 8_28, 8_27, 8_24, 8_21, 8_17, 8_14, 8_11, 8_08, 8_04, 8_01, 7_98, 7_95, 7_91, 7_88, 7_85, 7_84, 7_80, 7_77, 7_74, 7_70, 7_66, 7_63, 7_60, 7_56, 7_52, 7_49, 7_46, 7_42, 7_41, 7_37, 7_33, 7_30, 7_26, 7_22, 7_18, 7_14, 7_10, 7_07, 7_03, 6_99, 6_98, 6_94, 6_90, 6_85, 6_81, 6_77, 6_73, 6_69, 6_64, 6_60, 6_56, 6_55, 6_50, 6_46, 6_41, 6_36, 6_32, 6_27, 6_22, 6_18, 6_13, 6_12, 6_07, 6_02, 5_96, 5_91, 5_86, 5_80, 5_75, 5_70, 5_69, 5_63, 5_57, 5_51, 5_45, 5_39, 5_33, 5_27, 5_26, 5_19, 5_12, 5_05, 4_98, 4_91, 4_84, 4_83, 4_74, 4_66, 4_57, 4_49, 4_40, 4_39, 4_28, 4_18, 4_07, 3_96, 3_95, 3_81, 3_66, 3_52, 3_51, 3_30, 3_08, 3_07, 2_86, 2_64, 2_63, 2_42, 2_20, 2_19, 1_76, 1_75, 1_32, 1_31, 88, 44, 0, ] a_ : Dict = [ 9_99, 9_91, 9_82, 9_74, 9_66, 9_58, 9_50, 9_41, 9_33, 9_25, 9_16, 9_08, 9_00, 8_99, 8_74, 8_50, 8_25, 8_00, 7_99, 7_00, 6_00, 5_00, 4_00, 3_00, 2_00, 1_00, 0, ] a_ : Tuple = [ 9_99, 9_92, 9_85, 9_78, 9_71, 9_64, 9_57, 9_49, 9_42, 9_35, 9_28, 9_21, 9_14, 9_07, 9_00, 8_99, 8_79, 8_59, 8_40, 8_20, 8_00, 7_99, 7_66, 7_33, 7_00, 6_99, 6_50, 6_00, 5_99, 5_00, 4_99, 4_00, 3_99, 3_00, 2_99, 2_00, 1_99, 1_00, 99, 0, ] a_ : Any = [ 9_99, 9_96, 9_92, 9_89, 9_85, 9_82, 9_79, 9_75, 9_72, 9_68, 9_65, 9_61, 9_58, 9_55, 9_51, 9_48, 9_44, 9_41, 9_38, 9_34, 9_31, 9_27, 9_24, 9_20, 9_17, 9_14, 9_10, 9_07, 9_03, 9_00, 8_99, 8_91, 8_84, 8_76, 8_69, 8_61, 8_53, 8_46, 8_38, 8_30, 8_23, 8_15, 8_08, 8_00, 7_99, 7_88, 7_77, 7_66, 7_55, 7_44, 7_33, 7_22, 7_11, 7_00, 6_99, 6_88, 6_77, 6_66, 6_55, 6_44, 6_33, 6_22, 6_11, 6_00, 5_99, 5_85, 5_71, 5_57, 5_42, 5_28, 5_14, 5_00, 4_99, 4_85, 4_71, 4_57, 4_42, 4_28, 4_14, 4_00, 3_99, 3_79, 3_59, 3_40, 3_20, 3_00, 2_99, 2_79, 2_59, 2_40, 2_20, 2_00, 1_99, 1_66, 1_33, 1_00, 99, 66, 33, 0, ]
75
'''simple docstring''' from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class __UpperCamelCase : lowercase : Union[str, Any] =XGLMConfig lowercase : Optional[Any] ={} lowercase : Optional[int] ='gelu' def __init__( self, lowerCAmelCase, lowerCAmelCase=14, lowerCAmelCase=7, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=99, lowerCAmelCase=32, lowerCAmelCase=2, lowerCAmelCase=4, lowerCAmelCase=37, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=0.0_2, ): """simple docstring""" lowerCamelCase_ =parent lowerCamelCase_ =batch_size lowerCamelCase_ =seq_length lowerCamelCase_ =is_training lowerCamelCase_ =use_input_mask lowerCamelCase_ =use_labels lowerCamelCase_ =vocab_size lowerCamelCase_ =d_model lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =ffn_dim lowerCamelCase_ =activation_function lowerCamelCase_ =activation_dropout lowerCamelCase_ =attention_dropout lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =initializer_range lowerCamelCase_ =None lowerCamelCase_ =0 lowerCamelCase_ =2 lowerCamelCase_ =1 def lowercase__ ( self ): """simple docstring""" return XGLMConfig.from_pretrained('''facebook/xglm-564M''' ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length], self.vocab_size ), clip_value_min=0, clip_value_max=3 ) lowerCamelCase_ =None if self.use_input_mask: lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase_ =self.get_config() lowerCamelCase_ =floats_tensor([self.num_hidden_layers, self.num_attention_heads], 2 ) return ( config, input_ids, input_mask, head_mask, ) def lowercase__ ( self ): """simple docstring""" return XGLMConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, num_layers=self.num_hidden_layers, attention_heads=self.num_attention_heads, ffn_dim=self.ffn_dim, activation_function=self.activation_function, activation_dropout=self.activation_dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, use_cache=lowerCAmelCase, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, return_dict=lowerCAmelCase, ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.prepare_config_and_inputs() ( ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ) =config_and_inputs lowerCamelCase_ ={ '''input_ids''': input_ids, '''head_mask''': head_mask, } return config, inputs_dict @require_tf class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase : int =(TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () lowercase : Optional[Any] =(TFXGLMForCausalLM,) if is_tf_available() else () lowercase : Tuple =( {'feature-extraction': TFXGLMModel, 'text-generation': TFXGLMForCausalLM} if is_tf_available() else {} ) lowercase : Optional[Any] =False lowercase : Optional[Any] =False lowercase : Optional[int] =False def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =TFXGLMModelTester(self ) lowerCamelCase_ =ConfigTester(self, config_class=lowerCAmelCase, n_embd=37 ) def lowercase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() @slow def lowercase__ ( self ): """simple docstring""" for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ =TFXGLMModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) @unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''' ) def lowercase__ ( self ): """simple docstring""" super().test_resize_token_embeddings() @require_tf class __UpperCamelCase ( unittest.TestCase ): @slow def lowercase__ ( self, lowerCAmelCase=True ): """simple docstring""" lowerCamelCase_ =TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' ) lowerCamelCase_ =tf.convert_to_tensor([[2, 268, 9_865]], dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off lowerCamelCase_ =[2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581] # fmt: on lowerCamelCase_ =model.generate(lowerCAmelCase, do_sample=lowerCAmelCase, num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist(), lowerCAmelCase ) @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' ) lowerCamelCase_ =TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' ) tf.random.set_seed(0 ) lowerCamelCase_ =tokenizer('''Today is a nice day and''', return_tensors='''tf''' ) lowerCamelCase_ =tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(''':/CPU:0''' ): lowerCamelCase_ =model.generate(lowerCAmelCase, do_sample=lowerCAmelCase, seed=[7, 0] ) lowerCamelCase_ =tokenizer.decode(output_ids[0], skip_special_tokens=lowerCAmelCase ) lowerCamelCase_ =( '''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due''' ) self.assertEqual(lowerCAmelCase, lowerCAmelCase ) @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' ) lowerCamelCase_ =XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' ) lowerCamelCase_ ='''left''' # use different length sentences to test batching lowerCamelCase_ =[ '''This is an extremelly long sentence that only exists to test the ability of the model to cope with ''' '''left-padding, such as in batched generation. The output for the sequence below should be the same ''' '''regardless of whether left padding is applied or not. When''', '''Hello, my dog is a little''', ] lowerCamelCase_ =tokenizer(lowerCAmelCase, return_tensors='''tf''', padding=lowerCAmelCase ) lowerCamelCase_ =inputs['''input_ids'''] lowerCamelCase_ =model.generate(input_ids=lowerCAmelCase, attention_mask=inputs['''attention_mask'''], max_new_tokens=12 ) lowerCamelCase_ =tokenizer(sentences[0], return_tensors='''tf''' ).input_ids lowerCamelCase_ =model.generate(input_ids=lowerCAmelCase, max_new_tokens=12 ) lowerCamelCase_ =tokenizer(sentences[1], return_tensors='''tf''' ).input_ids lowerCamelCase_ =model.generate(input_ids=lowerCAmelCase, max_new_tokens=12 ) lowerCamelCase_ =tokenizer.batch_decode(lowerCAmelCase, skip_special_tokens=lowerCAmelCase ) lowerCamelCase_ =tokenizer.decode(output_non_padded[0], skip_special_tokens=lowerCAmelCase ) lowerCamelCase_ =tokenizer.decode(output_padded[0], skip_special_tokens=lowerCAmelCase ) lowerCamelCase_ =[ '''This is an extremelly long sentence that only exists to test the ability of the model to cope with ''' '''left-padding, such as in batched generation. The output for the sequence below should be the same ''' '''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be ''' '''a single''', '''Hello, my dog is a little bit of a shy one, but he is very friendly''', ] self.assertListEqual(lowerCAmelCase, lowerCAmelCase ) self.assertListEqual(lowerCAmelCase, [non_padded_sentence, padded_sentence] )
75
1
'''simple docstring''' def a_ ( __snake_case : Any , __snake_case : List[str] ) -> str: """simple docstring""" lowerCamelCase_ ='''''' for i in table: res += inp[i - 1] return res def a_ ( __snake_case : List[str] ) -> Optional[int]: """simple docstring""" return data[1:] + data[0] def a_ ( __snake_case : str , __snake_case : Tuple ) -> int: """simple docstring""" lowerCamelCase_ ='''''' for i in range(len(__snake_case ) ): if a[i] == b[i]: res += "0" else: res += "1" return res def a_ ( __snake_case : Optional[Any] , __snake_case : Tuple ) -> List[Any]: """simple docstring""" lowerCamelCase_ =int('''0b''' + data[0] + data[-1] , 2 ) lowerCamelCase_ =int('''0b''' + data[1:3] , 2 ) return bin(s[row][col] )[2:] def a_ ( __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : int , __snake_case : Tuple , __snake_case : List[Any] ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ =message[:4] lowerCamelCase_ =message[4:] lowerCamelCase_ =apply_table(__snake_case , __snake_case ) lowerCamelCase_ =xor(__snake_case , __snake_case ) lowerCamelCase_ =apply_sbox(__snake_case , temp[:4] ) # noqa: E741 lowerCamelCase_ =apply_sbox(__snake_case , temp[4:] ) lowerCamelCase_ ='''0''' * (2 - len(__snake_case )) + l # noqa: E741 lowerCamelCase_ ='''0''' * (2 - len(__snake_case )) + r lowerCamelCase_ =apply_table(l + r , __snake_case ) lowerCamelCase_ =xor(__snake_case , __snake_case ) return temp + right if __name__ == "__main__": a_ : Any = input("""Enter 10 bit key: """) a_ : Any = input("""Enter 8 bit message: """) a_ : str = [6, 3, 7, 4, 8, 5, 10, 9] a_ : str = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6] a_ : str = [2, 4, 3, 1] a_ : Optional[int] = [2, 6, 3, 1, 4, 8, 5, 7] a_ : Optional[Any] = [4, 1, 3, 5, 7, 2, 8, 6] a_ : Union[str, Any] = [4, 1, 2, 3, 2, 3, 4, 1] a_ : int = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]] a_ : Any = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]] # key generation a_ : List[Any] = apply_table(key, paa_table) a_ : str = temp[:5] a_ : Optional[Any] = temp[5:] a_ : Tuple = left_shift(left) a_ : Optional[Any] = left_shift(right) a_ : str = apply_table(left + right, pa_table) a_ : Optional[Any] = left_shift(left) a_ : Tuple = left_shift(right) a_ : Union[str, Any] = left_shift(left) a_ : List[str] = left_shift(right) a_ : Optional[int] = apply_table(left + right, pa_table) # encryption a_ : Optional[int] = apply_table(message, IP) a_ : List[Any] = function(expansion, sa, sa, keya, temp) a_ : str = temp[4:] + temp[:4] a_ : List[str] = function(expansion, sa, sa, keya, temp) a_ : Union[str, Any] = apply_table(temp, IP_inv) print("""Cipher text is:""", CT) # decryption a_ : Optional[int] = apply_table(CT, IP) a_ : List[Any] = function(expansion, sa, sa, keya, temp) a_ : int = temp[4:] + temp[:4] a_ : int = function(expansion, sa, sa, keya, temp) a_ : Optional[int] = apply_table(temp, IP_inv) print("""Plain text after decypting is:""", PT)
75
'''simple docstring''' import unittest from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __UpperCamelCase : @staticmethod def lowercase__ ( *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" pass @is_pipeline_test @require_vision @require_torch class __UpperCamelCase ( unittest.TestCase ): lowercase : int =MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =pipeline( '''zero-shot-object-detection''', model='''hf-internal-testing/tiny-random-owlvit-object-detection''' ) lowerCamelCase_ =[ { '''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''', '''candidate_labels''': ['''cat''', '''remote''', '''couch'''], } ] return object_detector, examples def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =object_detector(examples[0], threshold=0.0 ) lowerCamelCase_ =len(lowerCAmelCase ) self.assertGreater(lowerCAmelCase, 0 ) self.assertEqual( lowerCAmelCase, [ { '''score''': ANY(lowerCAmelCase ), '''label''': ANY(lowerCAmelCase ), '''box''': {'''xmin''': ANY(lowerCAmelCase ), '''ymin''': ANY(lowerCAmelCase ), '''xmax''': ANY(lowerCAmelCase ), '''ymax''': ANY(lowerCAmelCase )}, } for i in range(lowerCAmelCase ) ], ) @require_tf @unittest.skip('''Zero Shot Object Detection not implemented in TF''' ) def lowercase__ ( self ): """simple docstring""" pass @require_torch def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =pipeline( '''zero-shot-object-detection''', model='''hf-internal-testing/tiny-random-owlvit-object-detection''' ) lowerCamelCase_ =object_detector( '''./tests/fixtures/tests_samples/COCO/000000039769.png''', candidate_labels=['''cat''', '''remote''', '''couch'''], threshold=0.6_4, ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=4 ), [ {'''score''': 0.7_2_3_5, '''label''': '''cat''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}}, {'''score''': 0.7_2_1_8, '''label''': '''remote''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}}, {'''score''': 0.7_1_8_4, '''label''': '''couch''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}}, {'''score''': 0.6_7_4_8, '''label''': '''remote''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}}, {'''score''': 0.6_6_5_6, '''label''': '''cat''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}}, {'''score''': 0.6_6_1_4, '''label''': '''couch''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}}, {'''score''': 0.6_4_5_6, '''label''': '''remote''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}}, {'''score''': 0.6_4_2, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 274, '''xmax''': 93, '''ymax''': 297}}, {'''score''': 0.6_4_1_9, '''label''': '''cat''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}}, ], ) lowerCamelCase_ =object_detector( [ { '''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''', '''candidate_labels''': ['''cat''', '''remote''', '''couch'''], } ], threshold=0.6_4, ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=4 ), [ [ {'''score''': 0.7_2_3_5, '''label''': '''cat''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}}, {'''score''': 0.7_2_1_8, '''label''': '''remote''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}}, {'''score''': 0.7_1_8_4, '''label''': '''couch''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}}, {'''score''': 0.6_7_4_8, '''label''': '''remote''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}}, {'''score''': 0.6_6_5_6, '''label''': '''cat''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}}, {'''score''': 0.6_6_1_4, '''label''': '''couch''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}}, {'''score''': 0.6_4_5_6, '''label''': '''remote''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}}, {'''score''': 0.6_4_2, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 274, '''xmax''': 93, '''ymax''': 297}}, {'''score''': 0.6_4_1_9, '''label''': '''cat''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}}, ] ], ) @require_torch @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =pipeline('''zero-shot-object-detection''' ) lowerCamelCase_ =object_detector( '''http://images.cocodataset.org/val2017/000000039769.jpg''', candidate_labels=['''cat''', '''remote''', '''couch'''], ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=4 ), [ {'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}}, {'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}}, {'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}}, {'''score''': 0.1_4_7_4, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}}, {'''score''': 0.1_2_0_8, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}}, ], ) lowerCamelCase_ =object_detector( [ { '''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''', '''candidate_labels''': ['''cat''', '''remote''', '''couch'''], }, { '''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''', '''candidate_labels''': ['''cat''', '''remote''', '''couch'''], }, ], ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=4 ), [ [ {'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}}, {'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}}, {'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}}, {'''score''': 0.1_4_7_4, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}}, {'''score''': 0.1_2_0_8, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}}, ], [ {'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}}, {'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}}, {'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}}, {'''score''': 0.1_4_7_4, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}}, {'''score''': 0.1_2_0_8, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}}, ], ], ) @require_tf @unittest.skip('''Zero Shot Object Detection not implemented in TF''' ) def lowercase__ ( self ): """simple docstring""" pass @require_torch @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =0.2 lowerCamelCase_ =pipeline('''zero-shot-object-detection''' ) lowerCamelCase_ =object_detector( '''http://images.cocodataset.org/val2017/000000039769.jpg''', candidate_labels=['''cat''', '''remote''', '''couch'''], threshold=lowerCAmelCase, ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=4 ), [ {'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}}, {'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}}, {'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}}, ], ) @require_torch @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =2 lowerCamelCase_ =pipeline('''zero-shot-object-detection''' ) lowerCamelCase_ =object_detector( '''http://images.cocodataset.org/val2017/000000039769.jpg''', candidate_labels=['''cat''', '''remote''', '''couch'''], top_k=lowerCAmelCase, ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=4 ), [ {'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}}, {'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}}, ], )
75
1
'''simple docstring''' from datetime import datetime as dt import os from github import Github a_ : List[str] = [ """good first issue""", """good second issue""", """good difficult issue""", """feature request""", """new model""", """wip""", ] def a_ ( ) -> int: """simple docstring""" lowerCamelCase_ =Github(os.environ['''GITHUB_TOKEN'''] ) lowerCamelCase_ =g.get_repo('''huggingface/transformers''' ) lowerCamelCase_ =repo.get_issues(state='''open''' ) for issue in open_issues: lowerCamelCase_ =sorted([comment for comment in issue.get_comments()] , key=lambda __snake_case : i.created_at , reverse=__snake_case ) lowerCamelCase_ =comments[0] if len(__snake_case ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.") issue.edit(state='''closed''' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would add stale comment to {issue.number}") issue.create_comment( '''This issue has been automatically marked as stale because it has not had ''' '''recent activity. If you think this still needs to be addressed ''' '''please comment on this thread.\n\nPlease note that issues that do not follow the ''' '''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) ''' '''are likely to be ignored.''' ) if __name__ == "__main__": main()
75
'''simple docstring''' import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a_ : Optional[int] = logging.get_logger(__name__) a_ : Optional[int] = { """vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_config_file""": """tokenizer_config.json""", } a_ : List[Any] = { """vocab_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json""" }, """merges_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt""" }, """tokenizer_config_file""": { """facebook/blenderbot_small-90M""": ( """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json""" ) }, } a_ : Optional[int] = {"""facebook/blenderbot_small-90M""": 5_12} def a_ ( __snake_case : List[Any] ) -> Tuple: """simple docstring""" lowerCamelCase_ =set() lowerCamelCase_ =word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCamelCase_ =char lowerCamelCase_ =set(__snake_case ) return pairs class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[int] =VOCAB_FILES_NAMES lowercase : Tuple =PRETRAINED_VOCAB_FILES_MAP lowercase : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : Dict =['input_ids', 'attention_mask'] def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase="__start__", lowerCAmelCase="__end__", lowerCAmelCase="__unk__", lowerCAmelCase="__null__", **lowerCAmelCase, ): """simple docstring""" super().__init__(unk_token=lowerCAmelCase, bos_token=lowerCAmelCase, eos_token=lowerCAmelCase, pad_token=lowerCAmelCase, **lowerCAmelCase ) with open(lowerCAmelCase, encoding='''utf-8''' ) as vocab_handle: lowerCamelCase_ =json.load(lowerCAmelCase ) lowerCamelCase_ ={v: k for k, v in self.encoder.items()} with open(lowerCAmelCase, encoding='''utf-8''' ) as merges_handle: lowerCamelCase_ =merges_handle.read().split('''\n''' )[1:-1] lowerCamelCase_ =[tuple(merge.split() ) for merge in merges] lowerCamelCase_ =dict(zip(lowerCAmelCase, range(len(lowerCAmelCase ) ) ) ) lowerCamelCase_ ={} @property def lowercase__ ( self ): """simple docstring""" return len(self.encoder ) def lowercase__ ( self ): """simple docstring""" return dict(self.encoder, **self.added_tokens_encoder ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" if token in self.cache: return self.cache[token] lowerCamelCase_ =re.sub('''([.,!?()])''', R''' \1''', lowerCAmelCase ) lowerCamelCase_ =re.sub('''(\')''', R''' \1 ''', lowerCAmelCase ) lowerCamelCase_ =re.sub(R'''\s{2,}''', ''' ''', lowerCAmelCase ) if "\n" in token: lowerCamelCase_ =token.replace('''\n''', ''' __newln__''' ) lowerCamelCase_ =token.split(''' ''' ) lowerCamelCase_ =[] for token in tokens: if not len(lowerCAmelCase ): continue lowerCamelCase_ =token.lower() lowerCamelCase_ =tuple(lowerCAmelCase ) lowerCamelCase_ =tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] ) lowerCamelCase_ =get_pairs(lowerCAmelCase ) if not pairs: words.append(lowerCAmelCase ) continue while True: lowerCamelCase_ =min(lowerCAmelCase, key=lambda lowerCAmelCase : self.bpe_ranks.get(lowerCAmelCase, float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break lowerCamelCase_, lowerCamelCase_ =bigram lowerCamelCase_ =[] lowerCamelCase_ =0 while i < len(lowerCAmelCase ): try: lowerCamelCase_ =word.index(lowerCAmelCase, lowerCAmelCase ) new_word.extend(word[i:j] ) lowerCamelCase_ =j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(lowerCAmelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCamelCase_ =tuple(lowerCAmelCase ) lowerCamelCase_ =new_word if len(lowerCAmelCase ) == 1: break else: lowerCamelCase_ =get_pairs(lowerCAmelCase ) lowerCamelCase_ ='''@@ '''.join(lowerCAmelCase ) lowerCamelCase_ =word[:-4] lowerCamelCase_ =word words.append(lowerCAmelCase ) return " ".join(lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ =re.findall(R'''\S+\n?''', lowerCAmelCase ) for token in words: split_tokens.extend(list(self.bpe(lowerCAmelCase ).split(''' ''' ) ) ) return split_tokens def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =token.lower() return self.encoder.get(lowerCAmelCase, self.encoder.get(self.unk_token ) ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return self.decoder.get(lowerCAmelCase, self.unk_token ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =''' '''.join(lowerCAmelCase ).replace('''@@ ''', '''''' ).strip() return out_string def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" if not os.path.isdir(lowerCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCamelCase_ =os.path.join( lowerCAmelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCamelCase_ =os.path.join( lowerCAmelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(lowerCAmelCase, '''w''', encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=lowerCAmelCase, ensure_ascii=lowerCAmelCase ) + '''\n''' ) lowerCamelCase_ =0 with open(lowerCAmelCase, '''w''', encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda lowerCAmelCase : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ''' Please check that the tokenizer is not corrupted!''' ) lowerCamelCase_ =token_index writer.write(''' '''.join(lowerCAmelCase ) + '''\n''' ) index += 1 return vocab_file, merge_file
75
1
'''simple docstring''' import random def a_ ( __snake_case : int ) -> bool: """simple docstring""" lowerCamelCase_ =num - 1 lowerCamelCase_ =0 while s % 2 == 0: lowerCamelCase_ =s // 2 t += 1 for _ in range(5 ): lowerCamelCase_ =random.randrange(2 , num - 1 ) lowerCamelCase_ =pow(__snake_case , __snake_case , __snake_case ) if v != 1: lowerCamelCase_ =0 while v != (num - 1): if i == t - 1: return False else: lowerCamelCase_ =i + 1 lowerCamelCase_ =(v**2) % num return True def a_ ( __snake_case : int ) -> bool: """simple docstring""" if num < 2: return False lowerCamelCase_ =[ 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997, ] if num in low_primes: return True for prime in low_primes: if (num % prime) == 0: return False return rabin_miller(__snake_case ) def a_ ( __snake_case : int = 1024 ) -> int: """simple docstring""" while True: lowerCamelCase_ =random.randrange(2 ** (keysize - 1) , 2 ** (keysize) ) if is_prime_low_num(__snake_case ): return num if __name__ == "__main__": a_ : Dict = generate_large_prime() print(("""Prime number:""", num)) print(("""is_prime_low_num:""", is_prime_low_num(num)))
75
'''simple docstring''' from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Dict = logging.get_logger(__name__) a_ : Any = { """snap-research/efficientformer-l1-300""": ( """https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json""" ), } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : List[str] ='efficientformer' def __init__( self, lowerCAmelCase = [3, 2, 6, 4], lowerCAmelCase = [48, 96, 224, 448], lowerCAmelCase = [True, True, True, True], lowerCAmelCase = 448, lowerCAmelCase = 32, lowerCAmelCase = 4, lowerCAmelCase = 7, lowerCAmelCase = 5, lowerCAmelCase = 8, lowerCAmelCase = 4, lowerCAmelCase = 0.0, lowerCAmelCase = 16, lowerCAmelCase = 3, lowerCAmelCase = 3, lowerCAmelCase = 3, lowerCAmelCase = 2, lowerCAmelCase = 1, lowerCAmelCase = 0.0, lowerCAmelCase = 1, lowerCAmelCase = True, lowerCAmelCase = True, lowerCAmelCase = 1e-5, lowerCAmelCase = "gelu", lowerCAmelCase = 0.0_2, lowerCAmelCase = 1e-12, lowerCAmelCase = 224, lowerCAmelCase = 1e-05, **lowerCAmelCase, ): """simple docstring""" super().__init__(**lowerCAmelCase ) lowerCamelCase_ =hidden_act lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =hidden_sizes lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =initializer_range lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =patch_size lowerCamelCase_ =num_channels lowerCamelCase_ =depths lowerCamelCase_ =mlp_expansion_ratio lowerCamelCase_ =downsamples lowerCamelCase_ =dim lowerCamelCase_ =key_dim lowerCamelCase_ =attention_ratio lowerCamelCase_ =resolution lowerCamelCase_ =pool_size lowerCamelCase_ =downsample_patch_size lowerCamelCase_ =downsample_stride lowerCamelCase_ =downsample_pad lowerCamelCase_ =drop_path_rate lowerCamelCase_ =num_metaad_blocks lowerCamelCase_ =distillation lowerCamelCase_ =use_layer_scale lowerCamelCase_ =layer_scale_init_value lowerCamelCase_ =image_size lowerCamelCase_ =batch_norm_eps
75
1
'''simple docstring''' from __future__ import annotations import requests a_ : List[Any] = set( """approved_at_utc approved_by author_flair_background_color author_flair_css_class author_flair_richtext author_flair_template_id author_fullname author_premium can_mod_post category clicked content_categories created_utc downs edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta is_original_content is_reddit_media_domain is_video link_flair_css_class link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title name permalink pwls quarantine saved score secure_media secure_media_embed selftext subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type total_awards_received ups upvote_ratio url user_reports""".split() ) def a_ ( __snake_case : str , __snake_case : int = 1 , __snake_case : str = "new" , __snake_case : list | None = None ) -> dict: """simple docstring""" lowerCamelCase_ =wanted_data or [] if invalid_search_terms := ", ".join(sorted(set(__snake_case ) - valid_terms ) ): lowerCamelCase_ =F'''Invalid search term: {invalid_search_terms}''' raise ValueError(__snake_case ) lowerCamelCase_ =requests.get( F'''https://reddit.com/r/{subreddit}/{age}.json?limit={limit}''' , headers={'''User-agent''': '''A random string'''} , ) if response.status_code == 429: raise requests.HTTPError lowerCamelCase_ =response.json() if not wanted_data: return {id_: data["data"]["children"][id_] for id_ in range(__snake_case )} lowerCamelCase_ ={} for id_ in range(__snake_case ): lowerCamelCase_ ={ item: data['''data''']['''children'''][id_]['''data'''][item] for item in wanted_data } return data_dict if __name__ == "__main__": # If you get Error 429, that means you are rate limited.Try after some time print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""]))
75
'''simple docstring''' import itertools import random import unittest import numpy as np from transformers import is_speech_available from transformers.testing_utils import require_torch, require_torchaudio from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import SpeechaTextFeatureExtractor a_ : Union[str, Any] = random.Random() def a_ ( __snake_case : int , __snake_case : int=1.0 , __snake_case : Tuple=None , __snake_case : Union[str, Any]=None ) -> str: """simple docstring""" if rng is None: lowerCamelCase_ =global_rng lowerCamelCase_ =[] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class __UpperCamelCase ( unittest.TestCase ): def __init__( self, lowerCAmelCase, lowerCAmelCase=7, lowerCAmelCase=400, lowerCAmelCase=2_000, lowerCAmelCase=24, lowerCAmelCase=24, lowerCAmelCase=0.0, lowerCAmelCase=16_000, lowerCAmelCase=True, lowerCAmelCase=True, ): """simple docstring""" lowerCamelCase_ =parent lowerCamelCase_ =batch_size lowerCamelCase_ =min_seq_length lowerCamelCase_ =max_seq_length lowerCamelCase_ =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) lowerCamelCase_ =feature_size lowerCamelCase_ =num_mel_bins lowerCamelCase_ =padding_value lowerCamelCase_ =sampling_rate lowerCamelCase_ =return_attention_mask lowerCamelCase_ =do_normalize def lowercase__ ( self ): """simple docstring""" return { "feature_size": self.feature_size, "num_mel_bins": self.num_mel_bins, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def lowercase__ ( self, lowerCAmelCase=False, lowerCAmelCase=False ): """simple docstring""" def _flatten(lowerCAmelCase ): return list(itertools.chain(*lowerCAmelCase ) ) if equal_length: lowerCamelCase_ =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size lowerCamelCase_ =[ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff ) ] if numpify: lowerCamelCase_ =[np.asarray(lowerCAmelCase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ): lowercase : Any =SpeechaTextFeatureExtractor if is_speech_available() else None def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =SpeechaTextFeatureExtractionTester(self ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" self.assertTrue(np.all(np.mean(lowerCAmelCase, axis=0 ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase, axis=0 ) - 1 ) < 1e-3 ) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 lowerCamelCase_ =[floats_list((1, x) )[0] for x in range(800, 1_400, 200 )] lowerCamelCase_ =[np.asarray(lowerCAmelCase ) for speech_input in speech_inputs] # Test feature size lowerCamelCase_ =feature_extractor(lowerCAmelCase, padding=lowerCAmelCase, return_tensors='''np''' ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size ) # Test not batched input lowerCamelCase_ =feature_extractor(speech_inputs[0], return_tensors='''np''' ).input_features lowerCamelCase_ =feature_extractor(np_speech_inputs[0], return_tensors='''np''' ).input_features self.assertTrue(np.allclose(lowerCAmelCase, lowerCAmelCase, atol=1e-3 ) ) # Test batched lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''np''' ).input_features lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(lowerCAmelCase, lowerCAmelCase ): self.assertTrue(np.allclose(lowerCAmelCase, lowerCAmelCase, atol=1e-3 ) ) # Test 2-D numpy arrays are batched. lowerCamelCase_ =[floats_list((1, x) )[0] for x in (800, 800, 800)] lowerCamelCase_ =np.asarray(lowerCAmelCase ) lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''np''' ).input_features lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(lowerCAmelCase, lowerCAmelCase ): self.assertTrue(np.allclose(lowerCAmelCase, lowerCAmelCase, atol=1e-3 ) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase_ =[floats_list((1, x) )[0] for x in range(800, 1_400, 200 )] lowerCamelCase_ =['''longest''', '''max_length''', '''do_not_pad'''] lowerCamelCase_ =[None, 16, None] for max_length, padding in zip(lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =feature_extractor( lowerCAmelCase, padding=lowerCAmelCase, max_length=lowerCAmelCase, return_attention_mask=lowerCAmelCase ) lowerCamelCase_ =inputs.input_features lowerCamelCase_ =inputs.attention_mask lowerCamelCase_ =[np.sum(lowerCAmelCase ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase_ =[floats_list((1, x) )[0] for x in range(800, 1_400, 200 )] lowerCamelCase_ =['''longest''', '''max_length''', '''do_not_pad'''] lowerCamelCase_ =[None, 16, None] for max_length, padding in zip(lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =feature_extractor( lowerCAmelCase, max_length=lowerCAmelCase, padding=lowerCAmelCase, return_tensors='''np''', return_attention_mask=lowerCAmelCase ) lowerCamelCase_ =inputs.input_features lowerCamelCase_ =inputs.attention_mask lowerCamelCase_ =[np.sum(lowerCAmelCase ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase_ =[floats_list((1, x) )[0] for x in range(800, 1_400, 200 )] lowerCamelCase_ =feature_extractor( lowerCAmelCase, padding='''max_length''', max_length=4, truncation=lowerCAmelCase, return_tensors='''np''', return_attention_mask=lowerCAmelCase, ) lowerCamelCase_ =inputs.input_features lowerCamelCase_ =inputs.attention_mask lowerCamelCase_ =np.sum(attention_mask == 1, axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1] ) self._check_zero_mean_unit_variance(input_features[2] ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase_ =[floats_list((1, x) )[0] for x in range(800, 1_400, 200 )] lowerCamelCase_ =feature_extractor( lowerCAmelCase, padding='''longest''', max_length=4, truncation=lowerCAmelCase, return_tensors='''np''', return_attention_mask=lowerCAmelCase, ) lowerCamelCase_ =inputs.input_features lowerCamelCase_ =inputs.attention_mask lowerCamelCase_ =np.sum(attention_mask == 1, axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape, (3, 4, 24) ) lowerCamelCase_ =[floats_list((1, x) )[0] for x in range(800, 1_400, 200 )] lowerCamelCase_ =feature_extractor( lowerCAmelCase, padding='''longest''', max_length=16, truncation=lowerCAmelCase, return_tensors='''np''', return_attention_mask=lowerCAmelCase, ) lowerCamelCase_ =inputs.input_features lowerCamelCase_ =inputs.attention_mask lowerCamelCase_ =np.sum(attention_mask == 1, axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape, (3, 6, 24) ) def lowercase__ ( self ): """simple docstring""" import torch lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase_ =np.random.rand(100, 32 ).astype(np.floataa ) lowerCamelCase_ =np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: lowerCamelCase_ =feature_extractor.pad([{'''input_features''': inputs}], return_tensors='''np''' ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) lowerCamelCase_ =feature_extractor.pad([{'''input_features''': inputs}], return_tensors='''pt''' ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" from datasets import load_dataset lowerCamelCase_ =load_dataset('''hf-internal-testing/librispeech_asr_dummy''', '''clean''', split='''validation''' ) # automatic decoding with librispeech lowerCamelCase_ =ds.sort('''id''' ).select(range(lowerCAmelCase ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =np.array([ -1.5_7_4_5, -1.7_7_1_3, -1.7_0_2_0, -1.6_0_6_9, -1.2_2_5_0, -1.1_1_0_5, -0.9_0_7_2, -0.8_2_4_1, -1.2_3_1_0, -0.8_0_9_8, -0.3_3_2_0, -0.4_1_0_1, -0.7_9_8_5, -0.4_9_9_6, -0.8_2_1_3, -0.9_1_2_8, -1.0_4_2_0, -1.1_2_8_6, -1.0_4_4_0, -0.7_9_9_9, -0.8_4_0_5, -1.2_2_7_5, -1.5_4_4_3, -1.4_6_2_5, ] ) # fmt: on lowerCamelCase_ =self._load_datasamples(1 ) lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''pt''' ).input_features self.assertEquals(input_features.shape, (1, 584, 24) ) self.assertTrue(np.allclose(input_features[0, 0, :30], lowerCAmelCase, atol=1e-4 ) )
75
1
'''simple docstring''' from __future__ import annotations from decimal import Decimal from math import * # noqa: F403 from sympy import diff def a_ ( __snake_case : str , __snake_case : float | Decimal , __snake_case : float = 10**-10 ) -> float: """simple docstring""" lowerCamelCase_ =a while True: lowerCamelCase_ =Decimal(__snake_case ) - ( Decimal(eval(__snake_case ) ) / Decimal(eval(str(diff(__snake_case ) ) ) ) # noqa: S307 ) # This number dictates the accuracy of the answer if abs(eval(__snake_case ) ) < precision: # noqa: S307 return float(__snake_case ) # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(F"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""") # Find root of polynomial print(F"""The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}""") # Find Square Root of 5 print(F"""The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}""") # Exponential Roots print(F"""The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}""")
75
'''simple docstring''' def a_ ( __snake_case : Any , __snake_case : List[str] ) -> str: """simple docstring""" lowerCamelCase_ ='''''' for i in table: res += inp[i - 1] return res def a_ ( __snake_case : List[str] ) -> Optional[int]: """simple docstring""" return data[1:] + data[0] def a_ ( __snake_case : str , __snake_case : Tuple ) -> int: """simple docstring""" lowerCamelCase_ ='''''' for i in range(len(__snake_case ) ): if a[i] == b[i]: res += "0" else: res += "1" return res def a_ ( __snake_case : Optional[Any] , __snake_case : Tuple ) -> List[Any]: """simple docstring""" lowerCamelCase_ =int('''0b''' + data[0] + data[-1] , 2 ) lowerCamelCase_ =int('''0b''' + data[1:3] , 2 ) return bin(s[row][col] )[2:] def a_ ( __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : int , __snake_case : Tuple , __snake_case : List[Any] ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ =message[:4] lowerCamelCase_ =message[4:] lowerCamelCase_ =apply_table(__snake_case , __snake_case ) lowerCamelCase_ =xor(__snake_case , __snake_case ) lowerCamelCase_ =apply_sbox(__snake_case , temp[:4] ) # noqa: E741 lowerCamelCase_ =apply_sbox(__snake_case , temp[4:] ) lowerCamelCase_ ='''0''' * (2 - len(__snake_case )) + l # noqa: E741 lowerCamelCase_ ='''0''' * (2 - len(__snake_case )) + r lowerCamelCase_ =apply_table(l + r , __snake_case ) lowerCamelCase_ =xor(__snake_case , __snake_case ) return temp + right if __name__ == "__main__": a_ : Any = input("""Enter 10 bit key: """) a_ : Any = input("""Enter 8 bit message: """) a_ : str = [6, 3, 7, 4, 8, 5, 10, 9] a_ : str = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6] a_ : str = [2, 4, 3, 1] a_ : Optional[int] = [2, 6, 3, 1, 4, 8, 5, 7] a_ : Optional[Any] = [4, 1, 3, 5, 7, 2, 8, 6] a_ : Union[str, Any] = [4, 1, 2, 3, 2, 3, 4, 1] a_ : int = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]] a_ : Any = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]] # key generation a_ : List[Any] = apply_table(key, paa_table) a_ : str = temp[:5] a_ : Optional[Any] = temp[5:] a_ : Tuple = left_shift(left) a_ : Optional[Any] = left_shift(right) a_ : str = apply_table(left + right, pa_table) a_ : Optional[Any] = left_shift(left) a_ : Tuple = left_shift(right) a_ : Union[str, Any] = left_shift(left) a_ : List[str] = left_shift(right) a_ : Optional[int] = apply_table(left + right, pa_table) # encryption a_ : Optional[int] = apply_table(message, IP) a_ : List[Any] = function(expansion, sa, sa, keya, temp) a_ : str = temp[4:] + temp[:4] a_ : List[str] = function(expansion, sa, sa, keya, temp) a_ : Union[str, Any] = apply_table(temp, IP_inv) print("""Cipher text is:""", CT) # decryption a_ : Optional[int] = apply_table(CT, IP) a_ : List[Any] = function(expansion, sa, sa, keya, temp) a_ : int = temp[4:] + temp[:4] a_ : int = function(expansion, sa, sa, keya, temp) a_ : Optional[int] = apply_table(temp, IP_inv) print("""Plain text after decypting is:""", PT)
75
1
'''simple docstring''' from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Any = logging.get_logger(__name__) a_ : Optional[Any] = { """huggingface/time-series-transformer-tourism-monthly""": ( """https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json""" ), # See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[int] ='time_series_transformer' lowercase : Tuple ={ 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', 'num_hidden_layers': 'encoder_layers', } def __init__( self, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = "student_t", lowerCAmelCase = "nll", lowerCAmelCase = 1, lowerCAmelCase = [1, 2, 3, 4, 5, 6, 7], lowerCAmelCase = "mean", lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = 32, lowerCAmelCase = 32, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = True, lowerCAmelCase = "gelu", lowerCAmelCase = 64, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 100, lowerCAmelCase = 0.0_2, lowerCAmelCase=True, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =prediction_length lowerCamelCase_ =context_length or prediction_length lowerCamelCase_ =distribution_output lowerCamelCase_ =loss lowerCamelCase_ =input_size lowerCamelCase_ =num_time_features lowerCamelCase_ =lags_sequence lowerCamelCase_ =scaling lowerCamelCase_ =num_dynamic_real_features lowerCamelCase_ =num_static_real_features lowerCamelCase_ =num_static_categorical_features if cardinality and num_static_categorical_features > 0: if len(lowerCAmelCase ) != num_static_categorical_features: raise ValueError( '''The cardinality should be a list of the same length as `num_static_categorical_features`''' ) lowerCamelCase_ =cardinality else: lowerCamelCase_ =[0] if embedding_dimension and num_static_categorical_features > 0: if len(lowerCAmelCase ) != num_static_categorical_features: raise ValueError( '''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' ) lowerCamelCase_ =embedding_dimension else: lowerCamelCase_ =[min(50, (cat + 1) // 2 ) for cat in self.cardinality] lowerCamelCase_ =num_parallel_samples # Transformer architecture configuration lowerCamelCase_ =input_size * len(lowerCAmelCase ) + self._number_of_features lowerCamelCase_ =d_model lowerCamelCase_ =encoder_attention_heads lowerCamelCase_ =decoder_attention_heads lowerCamelCase_ =encoder_ffn_dim lowerCamelCase_ =decoder_ffn_dim lowerCamelCase_ =encoder_layers lowerCamelCase_ =decoder_layers lowerCamelCase_ =dropout lowerCamelCase_ =attention_dropout lowerCamelCase_ =activation_dropout lowerCamelCase_ =encoder_layerdrop lowerCamelCase_ =decoder_layerdrop lowerCamelCase_ =activation_function lowerCamelCase_ =init_std lowerCamelCase_ =use_cache super().__init__(is_encoder_decoder=lowerCAmelCase, **lowerCAmelCase ) @property def lowercase__ ( self ): """simple docstring""" return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
75
'''simple docstring''' import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all feature extractors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...feature_extraction_utils import FeatureExtractionMixin from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) a_ : List[Any] = logging.get_logger(__name__) a_ : Tuple = OrderedDict( [ ("""audio-spectrogram-transformer""", """ASTFeatureExtractor"""), ("""beit""", """BeitFeatureExtractor"""), ("""chinese_clip""", """ChineseCLIPFeatureExtractor"""), ("""clap""", """ClapFeatureExtractor"""), ("""clip""", """CLIPFeatureExtractor"""), ("""clipseg""", """ViTFeatureExtractor"""), ("""conditional_detr""", """ConditionalDetrFeatureExtractor"""), ("""convnext""", """ConvNextFeatureExtractor"""), ("""cvt""", """ConvNextFeatureExtractor"""), ("""data2vec-audio""", """Wav2Vec2FeatureExtractor"""), ("""data2vec-vision""", """BeitFeatureExtractor"""), ("""deformable_detr""", """DeformableDetrFeatureExtractor"""), ("""deit""", """DeiTFeatureExtractor"""), ("""detr""", """DetrFeatureExtractor"""), ("""dinat""", """ViTFeatureExtractor"""), ("""donut-swin""", """DonutFeatureExtractor"""), ("""dpt""", """DPTFeatureExtractor"""), ("""encodec""", """EncodecFeatureExtractor"""), ("""flava""", """FlavaFeatureExtractor"""), ("""glpn""", """GLPNFeatureExtractor"""), ("""groupvit""", """CLIPFeatureExtractor"""), ("""hubert""", """Wav2Vec2FeatureExtractor"""), ("""imagegpt""", """ImageGPTFeatureExtractor"""), ("""layoutlmv2""", """LayoutLMv2FeatureExtractor"""), ("""layoutlmv3""", """LayoutLMv3FeatureExtractor"""), ("""levit""", """LevitFeatureExtractor"""), ("""maskformer""", """MaskFormerFeatureExtractor"""), ("""mctct""", """MCTCTFeatureExtractor"""), ("""mobilenet_v1""", """MobileNetV1FeatureExtractor"""), ("""mobilenet_v2""", """MobileNetV2FeatureExtractor"""), ("""mobilevit""", """MobileViTFeatureExtractor"""), ("""nat""", """ViTFeatureExtractor"""), ("""owlvit""", """OwlViTFeatureExtractor"""), ("""perceiver""", """PerceiverFeatureExtractor"""), ("""poolformer""", """PoolFormerFeatureExtractor"""), ("""regnet""", """ConvNextFeatureExtractor"""), ("""resnet""", """ConvNextFeatureExtractor"""), ("""segformer""", """SegformerFeatureExtractor"""), ("""sew""", """Wav2Vec2FeatureExtractor"""), ("""sew-d""", """Wav2Vec2FeatureExtractor"""), ("""speech_to_text""", """Speech2TextFeatureExtractor"""), ("""speecht5""", """SpeechT5FeatureExtractor"""), ("""swiftformer""", """ViTFeatureExtractor"""), ("""swin""", """ViTFeatureExtractor"""), ("""swinv2""", """ViTFeatureExtractor"""), ("""table-transformer""", """DetrFeatureExtractor"""), ("""timesformer""", """VideoMAEFeatureExtractor"""), ("""tvlt""", """TvltFeatureExtractor"""), ("""unispeech""", """Wav2Vec2FeatureExtractor"""), ("""unispeech-sat""", """Wav2Vec2FeatureExtractor"""), ("""van""", """ConvNextFeatureExtractor"""), ("""videomae""", """VideoMAEFeatureExtractor"""), ("""vilt""", """ViltFeatureExtractor"""), ("""vit""", """ViTFeatureExtractor"""), ("""vit_mae""", """ViTFeatureExtractor"""), ("""vit_msn""", """ViTFeatureExtractor"""), ("""wav2vec2""", """Wav2Vec2FeatureExtractor"""), ("""wav2vec2-conformer""", """Wav2Vec2FeatureExtractor"""), ("""wavlm""", """Wav2Vec2FeatureExtractor"""), ("""whisper""", """WhisperFeatureExtractor"""), ("""xclip""", """CLIPFeatureExtractor"""), ("""yolos""", """YolosFeatureExtractor"""), ] ) a_ : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES) def a_ ( __snake_case : str ) -> Any: """simple docstring""" for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items(): if class_name in extractors: lowerCamelCase_ =model_type_to_module_name(__snake_case ) lowerCamelCase_ =importlib.import_module(F'''.{module_name}''' , '''transformers.models''' ) try: return getattr(__snake_case , __snake_case ) except AttributeError: continue for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items(): if getattr(__snake_case , '''__name__''' , __snake_case ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. lowerCamelCase_ =importlib.import_module('''transformers''' ) if hasattr(__snake_case , __snake_case ): return getattr(__snake_case , __snake_case ) return None def a_ ( __snake_case : Union[str, os.PathLike] , __snake_case : Optional[Union[str, os.PathLike]] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[Dict[str, str]] = None , __snake_case : Optional[Union[bool, str]] = None , __snake_case : Optional[str] = None , __snake_case : bool = False , **__snake_case : Union[str, Any] , ) -> List[str]: """simple docstring""" lowerCamelCase_ =get_file_from_repo( __snake_case , __snake_case , cache_dir=__snake_case , force_download=__snake_case , resume_download=__snake_case , proxies=__snake_case , use_auth_token=__snake_case , revision=__snake_case , local_files_only=__snake_case , ) if resolved_config_file is None: logger.info( '''Could not locate the feature extractor configuration file, will try to use the model config instead.''' ) return {} with open(__snake_case , encoding='''utf-8''' ) as reader: return json.load(__snake_case ) class __UpperCamelCase : def __init__( self ): """simple docstring""" raise EnvironmentError( '''AutoFeatureExtractor is designed to be instantiated ''' '''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' ) @classmethod @replace_list_option_in_docstrings(lowerCAmelCase ) def lowercase__ ( cls, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =kwargs.pop('''config''', lowerCAmelCase ) lowerCamelCase_ =kwargs.pop('''trust_remote_code''', lowerCAmelCase ) lowerCamelCase_ =True lowerCamelCase_, lowerCamelCase_ =FeatureExtractionMixin.get_feature_extractor_dict(lowerCAmelCase, **lowerCAmelCase ) lowerCamelCase_ =config_dict.get('''feature_extractor_type''', lowerCAmelCase ) lowerCamelCase_ =None if "AutoFeatureExtractor" in config_dict.get('''auto_map''', {} ): lowerCamelCase_ =config_dict['''auto_map''']['''AutoFeatureExtractor'''] # If we don't find the feature extractor class in the feature extractor config, let's try the model config. if feature_extractor_class is None and feature_extractor_auto_map is None: if not isinstance(lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase, **lowerCAmelCase ) # It could be in `config.feature_extractor_type`` lowerCamelCase_ =getattr(lowerCAmelCase, '''feature_extractor_type''', lowerCAmelCase ) if hasattr(lowerCAmelCase, '''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map: lowerCamelCase_ =config.auto_map['''AutoFeatureExtractor'''] if feature_extractor_class is not None: lowerCamelCase_ =feature_extractor_class_from_name(lowerCAmelCase ) lowerCamelCase_ =feature_extractor_auto_map is not None lowerCamelCase_ =feature_extractor_class is not None or type(lowerCAmelCase ) in FEATURE_EXTRACTOR_MAPPING lowerCamelCase_ =resolve_trust_remote_code( lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) if has_remote_code and trust_remote_code: lowerCamelCase_ =get_class_from_dynamic_module( lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ) lowerCamelCase_ =kwargs.pop('''code_revision''', lowerCAmelCase ) if os.path.isdir(lowerCAmelCase ): feature_extractor_class.register_for_auto_class() return feature_extractor_class.from_dict(lowerCAmelCase, **lowerCAmelCase ) elif feature_extractor_class is not None: return feature_extractor_class.from_dict(lowerCAmelCase, **lowerCAmelCase ) # Last try: we use the FEATURE_EXTRACTOR_MAPPING. elif type(lowerCAmelCase ) in FEATURE_EXTRACTOR_MAPPING: lowerCamelCase_ =FEATURE_EXTRACTOR_MAPPING[type(lowerCAmelCase )] return feature_extractor_class.from_dict(lowerCAmelCase, **lowerCAmelCase ) raise ValueError( f'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a ''' f'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following ''' f'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' ) @staticmethod def lowercase__ ( lowerCAmelCase, lowerCAmelCase ): """simple docstring""" FEATURE_EXTRACTOR_MAPPING.register(lowerCAmelCase, lowerCAmelCase )
75
1
'''simple docstring''' from __future__ import annotations def a_ ( __snake_case : float , __snake_case : float , __snake_case : float , ) -> tuple: """simple docstring""" if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1: raise ValueError('''You cannot supply more or less than 2 values''' ) elif electron_conc < 0: raise ValueError('''Electron concentration cannot be negative in a semiconductor''' ) elif hole_conc < 0: raise ValueError('''Hole concentration cannot be negative in a semiconductor''' ) elif intrinsic_conc < 0: raise ValueError( '''Intrinsic concentration cannot be negative in a semiconductor''' ) elif electron_conc == 0: return ( "electron_conc", intrinsic_conc**2 / hole_conc, ) elif hole_conc == 0: return ( "hole_conc", intrinsic_conc**2 / electron_conc, ) elif intrinsic_conc == 0: return ( "intrinsic_conc", (electron_conc * hole_conc) ** 0.5, ) else: return (-1, -1) if __name__ == "__main__": import doctest doctest.testmod()
75
'''simple docstring''' import argparse import csv import logging import os import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from tqdm import tqdm, trange from transformers import ( CONFIG_NAME, WEIGHTS_NAME, AdamW, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, get_linear_schedule_with_warmup, ) logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) a_ : Optional[int] = logging.getLogger(__name__) def a_ ( __snake_case : Union[str, Any] , __snake_case : Union[str, Any] ) -> str: """simple docstring""" lowerCamelCase_ =np.argmax(__snake_case , axis=1 ) return np.sum(outputs == labels ) def a_ ( __snake_case : List[Any] ) -> Tuple: """simple docstring""" with open(__snake_case , encoding='''utf_8''' ) as f: lowerCamelCase_ =csv.reader(__snake_case ) lowerCamelCase_ =[] next(__snake_case ) # skip the first line for line in tqdm(__snake_case ): output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) ) return output def a_ ( __snake_case : str , __snake_case : Dict , __snake_case : List[str] , __snake_case : List[str] , __snake_case : List[Any] , __snake_case : Dict ) -> Dict: """simple docstring""" lowerCamelCase_ =[] for dataset in encoded_datasets: lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =np.zeros((n_batch, 2, input_len) , dtype=np.intaa ) lowerCamelCase_ =np.zeros((n_batch, 2) , dtype=np.intaa ) lowerCamelCase_ =np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa ) lowerCamelCase_ =np.zeros((n_batch,) , dtype=np.intaa ) for ( i, (story, conta, conta, mc_label), ) in enumerate(__snake_case ): lowerCamelCase_ =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] lowerCamelCase_ =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] lowerCamelCase_ =with_conta lowerCamelCase_ =with_conta lowerCamelCase_ =len(__snake_case ) - 1 lowerCamelCase_ =len(__snake_case ) - 1 lowerCamelCase_ =with_conta lowerCamelCase_ =with_conta lowerCamelCase_ =mc_label lowerCamelCase_ =(input_ids, mc_token_ids, lm_labels, mc_labels) tensor_datasets.append(tuple(torch.tensor(__snake_case ) for t in all_inputs ) ) return tensor_datasets def a_ ( ) -> Optional[int]: """simple docstring""" lowerCamelCase_ =argparse.ArgumentParser() parser.add_argument('''--model_name''' , type=__snake_case , default='''openai-gpt''' , help='''pretrained model name''' ) parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' ) parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''' ) parser.add_argument( '''--output_dir''' , default=__snake_case , type=__snake_case , required=__snake_case , help='''The output directory where the model predictions and checkpoints will be written.''' , ) parser.add_argument('''--train_dataset''' , type=__snake_case , default='''''' ) parser.add_argument('''--eval_dataset''' , type=__snake_case , default='''''' ) parser.add_argument('''--seed''' , type=__snake_case , default=42 ) parser.add_argument('''--num_train_epochs''' , type=__snake_case , default=3 ) parser.add_argument('''--train_batch_size''' , type=__snake_case , default=8 ) parser.add_argument('''--eval_batch_size''' , type=__snake_case , default=16 ) parser.add_argument('''--adam_epsilon''' , default=1e-8 , type=__snake_case , help='''Epsilon for Adam optimizer.''' ) parser.add_argument('''--max_grad_norm''' , type=__snake_case , default=1 ) parser.add_argument( '''--max_steps''' , default=-1 , type=__snake_case , help=( '''If > 0: set total number of training steps to perform. Override num_train_epochs.''' ) , ) parser.add_argument( '''--gradient_accumulation_steps''' , type=__snake_case , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , ) parser.add_argument('''--learning_rate''' , type=__snake_case , default=6.25e-5 ) parser.add_argument('''--warmup_steps''' , default=0 , type=__snake_case , help='''Linear warmup over warmup_steps.''' ) parser.add_argument('''--lr_schedule''' , type=__snake_case , default='''warmup_linear''' ) parser.add_argument('''--weight_decay''' , type=__snake_case , default=0.0_1 ) parser.add_argument('''--lm_coef''' , type=__snake_case , default=0.9 ) parser.add_argument('''--n_valid''' , type=__snake_case , default=374 ) parser.add_argument('''--server_ip''' , type=__snake_case , default='''''' , help='''Can be used for distant debugging.''' ) parser.add_argument('''--server_port''' , type=__snake_case , default='''''' , help='''Can be used for distant debugging.''' ) lowerCamelCase_ =parser.parse_args() print(__snake_case ) if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('''Waiting for debugger attach''' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__snake_case ) ptvsd.wait_for_attach() random.seed(args.seed ) np.random.seed(args.seed ) torch.manual_seed(args.seed ) torch.cuda.manual_seed_all(args.seed ) lowerCamelCase_ =torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) lowerCamelCase_ =torch.cuda.device_count() logger.info('''device: {}, n_gpu {}'''.format(__snake_case , __snake_case ) ) if not args.do_train and not args.do_eval: raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' ) if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) # Load tokenizer and model # This loading functions also add new tokens and embeddings called `special tokens` # These new embeddings will be fine-tuned on the RocStories dataset lowerCamelCase_ =['''_start_''', '''_delimiter_''', '''_classify_'''] lowerCamelCase_ =OpenAIGPTTokenizer.from_pretrained(args.model_name ) tokenizer.add_tokens(__snake_case ) lowerCamelCase_ =tokenizer.convert_tokens_to_ids(__snake_case ) lowerCamelCase_ =OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name ) model.resize_token_embeddings(len(__snake_case ) ) model.to(__snake_case ) # Load and encode the datasets def tokenize_and_encode(__snake_case : Union[str, Any] ): if isinstance(__snake_case , __snake_case ): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(__snake_case ) ) elif isinstance(__snake_case , __snake_case ): return obj return [tokenize_and_encode(__snake_case ) for o in obj] logger.info('''Encoding dataset...''' ) lowerCamelCase_ =load_rocstories_dataset(args.train_dataset ) lowerCamelCase_ =load_rocstories_dataset(args.eval_dataset ) lowerCamelCase_ =(train_dataset, eval_dataset) lowerCamelCase_ =tokenize_and_encode(__snake_case ) # Compute the max input length for the Transformer lowerCamelCase_ =model.config.n_positions // 2 - 2 lowerCamelCase_ =max( len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3 for dataset in encoded_datasets for story, conta, conta, _ in dataset ) lowerCamelCase_ =min(__snake_case , model.config.n_positions ) # Max size of input for the pre-trained model # Prepare inputs tensors and dataloaders lowerCamelCase_ =pre_process_datasets(__snake_case , __snake_case , __snake_case , *__snake_case ) lowerCamelCase_, lowerCamelCase_ =tensor_datasets[0], tensor_datasets[1] lowerCamelCase_ =TensorDataset(*__snake_case ) lowerCamelCase_ =RandomSampler(__snake_case ) lowerCamelCase_ =DataLoader(__snake_case , sampler=__snake_case , batch_size=args.train_batch_size ) lowerCamelCase_ =TensorDataset(*__snake_case ) lowerCamelCase_ =SequentialSampler(__snake_case ) lowerCamelCase_ =DataLoader(__snake_case , sampler=__snake_case , batch_size=args.eval_batch_size ) # Prepare optimizer if args.do_train: if args.max_steps > 0: lowerCamelCase_ =args.max_steps lowerCamelCase_ =args.max_steps // (len(__snake_case ) // args.gradient_accumulation_steps) + 1 else: lowerCamelCase_ =len(__snake_case ) // args.gradient_accumulation_steps * args.num_train_epochs lowerCamelCase_ =list(model.named_parameters() ) lowerCamelCase_ =['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight'''] lowerCamelCase_ =[ { '''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )], '''weight_decay''': args.weight_decay, }, {'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0}, ] lowerCamelCase_ =AdamW(__snake_case , lr=args.learning_rate , eps=args.adam_epsilon ) lowerCamelCase_ =get_linear_schedule_with_warmup( __snake_case , num_warmup_steps=args.warmup_steps , num_training_steps=__snake_case ) if args.do_train: lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =0, 0, None model.train() for _ in trange(int(args.num_train_epochs ) , desc='''Epoch''' ): lowerCamelCase_ =0 lowerCamelCase_ =0 lowerCamelCase_ =tqdm(__snake_case , desc='''Training''' ) for step, batch in enumerate(__snake_case ): lowerCamelCase_ =tuple(t.to(__snake_case ) for t in batch ) lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =batch lowerCamelCase_ =model(__snake_case , mc_token_ids=__snake_case , lm_labels=__snake_case , mc_labels=__snake_case ) lowerCamelCase_ =args.lm_coef * losses[0] + losses[1] loss.backward() optimizer.step() scheduler.step() optimizer.zero_grad() tr_loss += loss.item() lowerCamelCase_ =( loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item() ) nb_tr_steps += 1 lowerCamelCase_ ='''Training loss: {:.2e} lr: {:.2e}'''.format(__snake_case , scheduler.get_lr()[0] ) # Save a trained model if args.do_train: # Save a trained model, configuration and tokenizer lowerCamelCase_ =model.module if hasattr(__snake_case , '''module''' ) else model # Only save the model itself # If we save using the predefined names, we can load using `from_pretrained` lowerCamelCase_ =os.path.join(args.output_dir , __snake_case ) lowerCamelCase_ =os.path.join(args.output_dir , __snake_case ) torch.save(model_to_save.state_dict() , __snake_case ) model_to_save.config.to_json_file(__snake_case ) tokenizer.save_vocabulary(args.output_dir ) # Load a trained model and vocabulary that you have fine-tuned lowerCamelCase_ =OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir ) lowerCamelCase_ =OpenAIGPTTokenizer.from_pretrained(args.output_dir ) model.to(__snake_case ) if args.do_eval: model.eval() lowerCamelCase_, lowerCamelCase_ =0, 0 lowerCamelCase_, lowerCamelCase_ =0, 0 for batch in tqdm(__snake_case , desc='''Evaluating''' ): lowerCamelCase_ =tuple(t.to(__snake_case ) for t in batch ) lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =batch with torch.no_grad(): lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =model( __snake_case , mc_token_ids=__snake_case , lm_labels=__snake_case , mc_labels=__snake_case ) lowerCamelCase_ =mc_logits.detach().cpu().numpy() lowerCamelCase_ =mc_labels.to('''cpu''' ).numpy() lowerCamelCase_ =accuracy(__snake_case , __snake_case ) eval_loss += mc_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0 ) nb_eval_steps += 1 lowerCamelCase_ =eval_loss / nb_eval_steps lowerCamelCase_ =eval_accuracy / nb_eval_examples lowerCamelCase_ =tr_loss / nb_tr_steps if args.do_train else None lowerCamelCase_ ={'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss} lowerCamelCase_ =os.path.join(args.output_dir , '''eval_results.txt''' ) with open(__snake_case , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key in sorted(result.keys() ): logger.info(''' %s = %s''' , __snake_case , str(result[key] ) ) writer.write('''%s = %s\n''' % (key, str(result[key] )) ) if __name__ == "__main__": main()
75
1
'''simple docstring''' from typing import Dict, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract a_ : Dict = logging.get_logger(__name__) def a_ ( __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : Dict ) -> Any: """simple docstring""" return [ int(1000 * (box[0] / width) ), int(1000 * (box[1] / height) ), int(1000 * (box[2] / width) ), int(1000 * (box[3] / height) ), ] def a_ ( __snake_case : np.ndarray , __snake_case : Optional[str] , __snake_case : Optional[str] = None ) -> Dict: """simple docstring""" lowerCamelCase_ =tesseract_config if tesseract_config is not None else '''''' # apply OCR lowerCamelCase_ =to_pil_image(__snake_case ) lowerCamelCase_, lowerCamelCase_ =pil_image.size lowerCamelCase_ =pytesseract.image_to_data(__snake_case , lang=__snake_case , output_type='''dict''' , config=__snake_case ) lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height'''] # filter empty words and corresponding coordinates lowerCamelCase_ =[idx for idx, word in enumerate(__snake_case ) if not word.strip()] lowerCamelCase_ =[word for idx, word in enumerate(__snake_case ) if idx not in irrelevant_indices] lowerCamelCase_ =[coord for idx, coord in enumerate(__snake_case ) if idx not in irrelevant_indices] lowerCamelCase_ =[coord for idx, coord in enumerate(__snake_case ) if idx not in irrelevant_indices] lowerCamelCase_ =[coord for idx, coord in enumerate(__snake_case ) if idx not in irrelevant_indices] lowerCamelCase_ =[coord for idx, coord in enumerate(__snake_case ) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format lowerCamelCase_ =[] for x, y, w, h in zip(__snake_case , __snake_case , __snake_case , __snake_case ): lowerCamelCase_ =[x, y, x + w, y + h] actual_boxes.append(__snake_case ) # finally, normalize the bounding boxes lowerCamelCase_ =[] for box in actual_boxes: normalized_boxes.append(normalize_box(__snake_case , __snake_case , __snake_case ) ) assert len(__snake_case ) == len(__snake_case ), "Not as many words as there are bounding boxes" return words, normalized_boxes class __UpperCamelCase ( lowerCamelCase__ ): lowercase : int =['pixel_values'] def __init__( self, lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = PILImageResampling.BILINEAR, lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = "", **lowerCAmelCase, ): """simple docstring""" super().__init__(**lowerCAmelCase ) lowerCamelCase_ =size if size is not None else {'''height''': 224, '''width''': 224} lowerCamelCase_ =get_size_dict(lowerCAmelCase ) lowerCamelCase_ =do_resize lowerCamelCase_ =size lowerCamelCase_ =resample lowerCamelCase_ =apply_ocr lowerCamelCase_ =ocr_lang lowerCamelCase_ =tesseract_config def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = PILImageResampling.BILINEAR, lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =get_size_dict(lowerCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' ) lowerCamelCase_ =(size['''height'''], size['''width''']) return resize(lowerCAmelCase, size=lowerCAmelCase, resample=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = ChannelDimension.FIRST, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =do_resize if do_resize is not None else self.do_resize lowerCamelCase_ =size if size is not None else self.size lowerCamelCase_ =get_size_dict(lowerCAmelCase ) lowerCamelCase_ =resample if resample is not None else self.resample lowerCamelCase_ =apply_ocr if apply_ocr is not None else self.apply_ocr lowerCamelCase_ =ocr_lang if ocr_lang is not None else self.ocr_lang lowerCamelCase_ =tesseract_config if tesseract_config is not None else self.tesseract_config lowerCamelCase_ =make_list_of_images(lowerCAmelCase ) if not valid_images(lowerCAmelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) # All transformations expect numpy arrays. lowerCamelCase_ =[to_numpy_array(lowerCAmelCase ) for image in images] if apply_ocr: requires_backends(self, '''pytesseract''' ) lowerCamelCase_ =[] lowerCamelCase_ =[] for image in images: lowerCamelCase_, lowerCamelCase_ =apply_tesseract(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) words_batch.append(lowerCAmelCase ) boxes_batch.append(lowerCAmelCase ) if do_resize: lowerCamelCase_ =[self.resize(image=lowerCAmelCase, size=lowerCAmelCase, resample=lowerCAmelCase ) for image in images] # flip color channels from RGB to BGR (as Detectron2 requires this) lowerCamelCase_ =[flip_channel_order(lowerCAmelCase ) for image in images] lowerCamelCase_ =[to_channel_dimension_format(lowerCAmelCase, lowerCAmelCase ) for image in images] lowerCamelCase_ =BatchFeature(data={'''pixel_values''': images}, tensor_type=lowerCAmelCase ) if apply_ocr: lowerCamelCase_ =words_batch lowerCamelCase_ =boxes_batch return data
75
'''simple docstring''' import copy import os import cva import numpy as np from matplotlib import pyplot as plt class __UpperCamelCase : def __init__( self ): """simple docstring""" lowerCamelCase_ ='''''' lowerCamelCase_ ='''''' lowerCamelCase_ =[] lowerCamelCase_ =0 lowerCamelCase_ =256 lowerCamelCase_ =0 lowerCamelCase_ =0 lowerCamelCase_ =0 lowerCamelCase_ =0 def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =cva.imread(lowerCAmelCase, 0 ) lowerCamelCase_ =copy.deepcopy(self.img ) lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =plt.hist(self.img.ravel(), 256, [0, 256], label='''x''' ) lowerCamelCase_ =np.sum(lowerCAmelCase ) for i in range(len(lowerCAmelCase ) ): lowerCamelCase_ =x[i] / self.k self.sk += prk lowerCamelCase_ =(self.L - 1) * self.sk if self.rem != 0: lowerCamelCase_ =int(last % last ) lowerCamelCase_ =int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(lowerCAmelCase ) lowerCamelCase_ =int(np.ma.count(self.img ) / self.img[1].size ) lowerCamelCase_ =self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): lowerCamelCase_ =self.img[j][i] if num != self.last_list[num]: lowerCamelCase_ =self.last_list[num] cva.imwrite('''output_data/output.jpg''', self.img ) def lowercase__ ( self ): """simple docstring""" plt.hist(self.img.ravel(), 256, [0, 256] ) def lowercase__ ( self ): """simple docstring""" cva.imshow('''Output-Image''', self.img ) cva.imshow('''Input-Image''', self.original_image ) cva.waitKey(5_000 ) cva.destroyAllWindows() if __name__ == "__main__": a_ : str = os.path.join(os.path.basename(__file__), """image_data/input.jpg""") a_ : Optional[Any] = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
75
1
'''simple docstring''' import importlib.util import json import os import warnings from dataclasses import dataclass, field import torch from ..training_args import TrainingArguments from ..utils import cached_property, is_sagemaker_dp_enabled, logging a_ : Optional[Any] = logging.get_logger(__name__) def a_ ( ) -> Union[str, Any]: """simple docstring""" # Get the sagemaker specific mp parameters from smp_options variable. lowerCamelCase_ =os.getenv('''SM_HP_MP_PARAMETERS''' , '''{}''' ) try: # Parse it and check the field "partitions" is included, it is required for model parallel. lowerCamelCase_ =json.loads(__snake_case ) if "partitions" not in smp_options: return False except json.JSONDecodeError: return False # Get the sagemaker specific framework parameters from mpi_options variable. lowerCamelCase_ =os.getenv('''SM_FRAMEWORK_PARAMS''' , '''{}''' ) try: # Parse it and check the field "sagemaker_distributed_dataparallel_enabled". lowerCamelCase_ =json.loads(__snake_case ) if not mpi_options.get('''sagemaker_mpi_enabled''' , __snake_case ): return False except json.JSONDecodeError: return False # Lastly, check if the `smdistributed` module is present. return importlib.util.find_spec('''smdistributed''' ) is not None if is_sagemaker_model_parallel_available(): import smdistributed.modelparallel.torch as smp smp.init() @dataclass class __UpperCamelCase ( lowerCamelCase__ ): lowercase : str =field( default='' , metadata={'help': 'Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'} , ) def lowercase__ ( self ): """simple docstring""" super().__post_init__() warnings.warn( '''`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use ''' '''`TrainingArguments` instead.''', lowerCAmelCase, ) @cached_property def lowercase__ ( self ): """simple docstring""" logger.info('''PyTorch: setting up devices''' ) if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1: logger.warning( '''torch.distributed process group is initialized, but local_rank == -1. ''' '''In order to use Torch DDP, launch your script with `python -m torch.distributed.launch''' ) if self.no_cuda: lowerCamelCase_ =torch.device('''cpu''' ) lowerCamelCase_ =0 elif is_sagemaker_model_parallel_available(): lowerCamelCase_ =smp.local_rank() lowerCamelCase_ =torch.device('''cuda''', lowerCAmelCase ) lowerCamelCase_ =1 elif is_sagemaker_dp_enabled(): import smdistributed.dataparallel.torch.torch_smddp # noqa: F401 torch.distributed.init_process_group(backend='''smddp''', timeout=self.ddp_timeout_delta ) lowerCamelCase_ =int(os.getenv('''SMDATAPARALLEL_LOCAL_RANK''' ) ) lowerCamelCase_ =torch.device('''cuda''', self.local_rank ) lowerCamelCase_ =1 elif self.local_rank == -1: # if n_gpu is > 1 we'll use nn.DataParallel. # If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0` # Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will # trigger an error that a device index is missing. Index 0 takes into account the # GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0` # will use the first GPU in that env, i.e. GPU#1 lowerCamelCase_ =torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' ) # Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at # the default value. lowerCamelCase_ =torch.cuda.device_count() else: # Here, we'll use torch.distributed. # Initializes the distributed backend which will take care of synchronizing nodes/GPUs if not torch.distributed.is_initialized(): torch.distributed.init_process_group(backend='''nccl''', timeout=self.ddp_timeout_delta ) lowerCamelCase_ =torch.device('''cuda''', self.local_rank ) lowerCamelCase_ =1 if device.type == "cuda": torch.cuda.set_device(lowerCAmelCase ) return device @property def lowercase__ ( self ): """simple docstring""" if is_sagemaker_model_parallel_available(): return smp.dp_size() return super().world_size @property def lowercase__ ( self ): """simple docstring""" return not is_sagemaker_model_parallel_available() @property def lowercase__ ( self ): """simple docstring""" return False
75
'''simple docstring''' from collections import UserDict from typing import Union import numpy as np import requests from ..utils import ( add_end_docstrings, logging, ) from .audio_classification import ffmpeg_read from .base import PIPELINE_INIT_ARGS, Pipeline a_ : Any = logging.get_logger(__name__) @add_end_docstrings(lowerCamelCase__ ) class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, **lowerCAmelCase ): """simple docstring""" super().__init__(**lowerCAmelCase ) if self.framework != "pt": raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' ) # No specific FOR_XXX available yet def __call__( self, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return super().__call__(lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ ={} if "candidate_labels" in kwargs: lowerCamelCase_ =kwargs['''candidate_labels'''] if "hypothesis_template" in kwargs: lowerCamelCase_ =kwargs['''hypothesis_template'''] return preprocess_params, {}, {} def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None, lowerCAmelCase="This is a sound of {}." ): """simple docstring""" if isinstance(lowerCAmelCase, lowerCAmelCase ): if audio.startswith('''http://''' ) or audio.startswith('''https://''' ): # We need to actually check for a real protocol, otherwise it's impossible to use a local file # like http_huggingface_co.png lowerCamelCase_ =requests.get(lowerCAmelCase ).content else: with open(lowerCAmelCase, '''rb''' ) as f: lowerCamelCase_ =f.read() if isinstance(lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =ffmpeg_read(lowerCAmelCase, self.feature_extractor.sampling_rate ) if not isinstance(lowerCAmelCase, np.ndarray ): raise ValueError('''We expect a numpy ndarray as input''' ) if len(audio.shape ) != 1: raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' ) lowerCamelCase_ =self.feature_extractor( [audio], sampling_rate=self.feature_extractor.sampling_rate, return_tensors='''pt''' ) lowerCamelCase_ =candidate_labels lowerCamelCase_ =[hypothesis_template.format(lowerCAmelCase ) for x in candidate_labels] lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=self.framework, padding=lowerCAmelCase ) lowerCamelCase_ =[text_inputs] return inputs def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =model_inputs.pop('''candidate_labels''' ) lowerCamelCase_ =model_inputs.pop('''text_inputs''' ) if isinstance(text_inputs[0], lowerCAmelCase ): lowerCamelCase_ =text_inputs[0] else: # Batching case. lowerCamelCase_ =text_inputs[0][0] lowerCamelCase_ =self.model(**lowerCAmelCase, **lowerCAmelCase ) lowerCamelCase_ ={ '''candidate_labels''': candidate_labels, '''logits''': outputs.logits_per_audio, } return model_outputs def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =model_outputs.pop('''candidate_labels''' ) lowerCamelCase_ =model_outputs['''logits'''][0] if self.framework == "pt": lowerCamelCase_ =logits.softmax(dim=0 ) lowerCamelCase_ =probs.tolist() else: raise ValueError('''`tf` framework not supported.''' ) lowerCamelCase_ =[ {'''score''': score, '''label''': candidate_label} for score, candidate_label in sorted(zip(lowerCAmelCase, lowerCAmelCase ), key=lambda lowerCAmelCase : -x[0] ) ] return result
75
1
'''simple docstring''' import importlib import shutil import threading import warnings from typing import List import fsspec import fsspec.asyn from . import compression from .hffilesystem import HfFileSystem a_ : int = importlib.util.find_spec("""s3fs""") is not None if _has_safs: from .safilesystem import SaFileSystem # noqa: F401 a_ : List[compression.BaseCompressedFileFileSystem] = [ compression.BzaFileSystem, compression.GzipFileSystem, compression.LzaFileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(F"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""") fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) def a_ ( __snake_case : str ) -> str: """simple docstring""" if "://" in dataset_path: lowerCamelCase_ =dataset_path.split('''://''' )[1] return dataset_path def a_ ( __snake_case : fsspec.AbstractFileSystem ) -> bool: """simple docstring""" if fs is not None and fs.protocol != "file": return True else: return False def a_ ( __snake_case : fsspec.AbstractFileSystem , __snake_case : str , __snake_case : str ) -> List[str]: """simple docstring""" lowerCamelCase_ =not is_remote_filesystem(__snake_case ) if is_local: # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(__snake_case ) , fs._strip_protocol(__snake_case ) ) else: fs.mv(__snake_case , __snake_case , recursive=__snake_case ) def a_ ( ) -> None: """simple docstring""" if hasattr(fsspec.asyn , '''reset_lock''' ): # for future fsspec>2022.05.0 fsspec.asyn.reset_lock() else: lowerCamelCase_ =None lowerCamelCase_ =None lowerCamelCase_ =threading.Lock()
75
'''simple docstring''' import unittest from pathlib import Path from shutil import copyfile from transformers import SPIECE_UNDERLINE, is_sentencepiece_available from transformers.models.speech_to_text import SpeechaTextTokenizer from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin a_ : str = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_sentencepiece_available(): import sentencepiece as sp a_ : Optional[Any] = 5 a_ : str = 10 @require_sentencepiece @require_tokenizers class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ): lowercase : int =SpeechaTextTokenizer lowercase : int =False lowercase : List[str] =True def lowercase__ ( self ): """simple docstring""" super().setUp() lowerCamelCase_ =sp.SentencePieceProcessor() spm_model.Load(lowerCAmelCase ) lowerCamelCase_ =['''<s>''', '''<pad>''', '''</s>''', '''<unk>'''] vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(lowerCAmelCase ) )] lowerCamelCase_ =dict(zip(lowerCAmelCase, range(len(lowerCAmelCase ) ) ) ) lowerCamelCase_ =Path(self.tmpdirname ) save_json(lowerCAmelCase, save_dir / VOCAB_FILES_NAMES['''vocab_file'''] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(lowerCAmelCase, save_dir / VOCAB_FILES_NAMES['''spm_file'''] ) lowerCamelCase_ =SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''<pad>''' lowerCamelCase_ =1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase ), lowerCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase ), lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0], '''<s>''' ) self.assertEqual(vocab_keys[1], '''<pad>''' ) self.assertEqual(vocab_keys[-1], '''j''' ) self.assertEqual(len(lowerCAmelCase ), 1_001 ) def lowercase__ ( self ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size, 1_001 ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) lowerCamelCase_ =tokenizer.tokenize('''This is a test''' ) self.assertListEqual(lowerCAmelCase, ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCAmelCase ), [289, 50, 14, 174, 386], ) lowerCamelCase_ =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( lowerCAmelCase, [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''], ) lowerCamelCase_ =tokenizer.convert_tokens_to_ids(lowerCAmelCase ) self.assertListEqual(lowerCAmelCase, [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] ) lowerCamelCase_ =tokenizer.convert_ids_to_tokens(lowerCAmelCase ) self.assertListEqual( lowerCAmelCase, [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''], ) @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ={'''input_ids''': [[3_791, 797, 31, 11, 64, 797, 31, 2_429, 433, 12, 1_176, 12, 20, 786, 915, 142, 2_413, 240, 37, 3_238, 797, 31, 11, 35, 93, 915, 142, 2_413, 240, 37, 5_540, 567, 1_276, 93, 37, 610, 40, 62, 455, 657, 1_042, 123, 780, 177, 37, 309, 241, 1_298, 514, 20, 292, 2_737, 114, 2_469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3_388, 511, 459, 4, 3_555, 40, 321, 302, 705, 4, 3_388, 511, 583, 326, 5, 5, 5, 62, 3_310, 560, 177, 2_680, 217, 1_508, 32, 31, 853, 418, 64, 583, 511, 1_605, 62, 35, 93, 560, 177, 2_680, 217, 1_508, 1_521, 64, 583, 511, 519, 62, 20, 1_515, 764, 20, 149, 261, 5_625, 7_972, 20, 5_540, 567, 1_276, 93, 3_925, 1_675, 11, 15, 802, 7_972, 576, 217, 1_508, 11, 35, 93, 1_253, 2_441, 15, 289, 652, 31, 416, 321, 3_842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2_681, 1_153, 3_434, 20, 5_540, 37, 567, 126, 1_253, 2_441, 3_376, 449, 210, 431, 1_563, 177, 767, 5_540, 11, 1_203, 472, 11, 2_953, 685, 285, 364, 706, 1_153, 20, 6_799, 20, 2_869, 20, 4_464, 126, 40, 2_429, 20, 1_040, 866, 2_664, 418, 20, 318, 20, 1_726, 186, 20, 265, 522, 35, 93, 2_191, 4_634, 20, 1_040, 12, 6_799, 15, 228, 2_356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_575, 2_666, 684, 1_582, 1_176, 12, 627, 149, 619, 20, 4_902, 563, 11, 20, 149, 261, 3_420, 2_356, 174, 142, 4_714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase, model_name='''facebook/s2t-small-mustc-en-de-st''', revision='''a14f04cf0776c02f62a8cb800cf7909e15ea23ad''', ) @require_sentencepiece class __UpperCamelCase ( unittest.TestCase ): lowercase : Tuple ='valhalla/s2t_mustc_multilinguial_medium' lowercase : Dict ='C\'est trop cool' lowercase : str ='Esto es genial' @classmethod def lowercase__ ( cls ): """simple docstring""" lowerCamelCase_ =SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name ) return cls def lowercase__ ( self ): """simple docstring""" self.assertEqual(self.tokenizer.lang_code_to_id['''pt'''], 4 ) self.assertEqual(self.tokenizer.lang_code_to_id['''ru'''], 6 ) self.assertEqual(self.tokenizer.lang_code_to_id['''it'''], 9 ) self.assertEqual(self.tokenizer.lang_code_to_id['''de'''], 11 ) def lowercase__ ( self ): """simple docstring""" self.assertEqual(self.tokenizer.vocab_size, 10_000 ) def lowercase__ ( self ): """simple docstring""" self.assertIn(lowerCAmelCase, self.tokenizer.all_special_ids ) lowerCamelCase_ =[ES_CODE, 4, 1_601, 47, 7_647, 2] lowerCamelCase_ =self.tokenizer.decode(lowerCAmelCase, skip_special_tokens=lowerCAmelCase ) lowerCamelCase_ =self.tokenizer.decode(generated_ids[1:], skip_special_tokens=lowerCAmelCase ) self.assertEqual(lowerCAmelCase, lowerCAmelCase ) self.assertNotIn(self.tokenizer.eos_token, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''fr''' lowerCamelCase_ =self.tokenizer(self.french_text ).input_ids self.assertEqual(encoded[0], lowerCAmelCase ) self.assertEqual(encoded[-1], self.tokenizer.eos_token_id ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''fr''' self.assertListEqual(self.tokenizer.prefix_tokens, [FR_CODE] ) lowerCamelCase_ ='''es''' self.assertListEqual(self.tokenizer.prefix_tokens, [ES_CODE] )
75
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) a_ : Union[str, Any] = { """configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""], """processing_trocr""": ["""TrOCRProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Union[str, Any] = [ """TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""", """TrOCRForCausalLM""", """TrOCRPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys a_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
75
'''simple docstring''' import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch) # also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml # same for Vicuna-13b from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipImageProcessor, InstructBlipConfig, InstructBlipForConditionalGeneration, InstructBlipProcessor, InstructBlipQFormerConfig, InstructBlipVisionConfig, LlamaConfig, LlamaTokenizerFast, TaConfig, TaTokenizerFast, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def a_ ( ) -> Dict: """simple docstring""" lowerCamelCase_ ='''https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg''' lowerCamelCase_ =Image.open(requests.get(__snake_case , stream=__snake_case ).raw ).convert('''RGB''' ) return image def a_ ( __snake_case : Tuple ) -> Dict: """simple docstring""" lowerCamelCase_ =[] # fmt: off # vision encoder rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') ) rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') ) rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') ) rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') ) rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') ) rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') ) # QFormer rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.embeddings.layernorm.weight''') ) rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.embeddings.layernorm.bias''') ) # fmt: on return rename_keys def a_ ( __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : List[Any] ) -> Dict: """simple docstring""" lowerCamelCase_ =dct.pop(__snake_case ) lowerCamelCase_ =val def a_ ( __snake_case : str , __snake_case : Optional[Any] ) -> Any: """simple docstring""" for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases lowerCamelCase_ =state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' ) lowerCamelCase_ =state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' ) # next, set bias in the state dict lowerCamelCase_ =torch.cat((q_bias, torch.zeros_like(__snake_case , requires_grad=__snake_case ), v_bias) ) lowerCamelCase_ =qkv_bias def a_ ( __snake_case : List[str] ) -> Any: """simple docstring""" lowerCamelCase_ =364 if '''coco''' in model_name else 224 lowerCamelCase_ =InstructBlipVisionConfig(image_size=__snake_case ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "t5-xl" in model_name: lowerCamelCase_ =TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: lowerCamelCase_ =TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict() elif "vicuna-7b" in model_name: lowerCamelCase_ =LlamaConfig.from_pretrained('''decapoda-research/llama-7b-hf''' , vocab_size=3_2001 ).to_dict() elif "vicuna-13b" in model_name: lowerCamelCase_ =LlamaConfig.from_pretrained('''decapoda-research/llama-13b-hf''' , vocab_size=3_2001 ).to_dict() else: raise ValueError('''Model name not supported''' ) # the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1 lowerCamelCase_ =InstructBlipQFormerConfig(vocab_size=3_0523 ).to_dict() lowerCamelCase_ =InstructBlipConfig(vision_config=__snake_case , text_config=__snake_case , qformer_config=__snake_case ) return config, image_size @torch.no_grad() def a_ ( __snake_case : Optional[int] , __snake_case : str=None , __snake_case : List[Any]=False ) -> List[str]: """simple docstring""" lowerCamelCase_ =AutoTokenizer.from_pretrained('''bert-base-uncased''' , truncation_side='''left''' ) qformer_tokenizer.add_special_tokens({'''bos_token''': '''[DEC]'''} ) if "t5" in model_name: lowerCamelCase_ =TaTokenizerFast.from_pretrained('''google/flan-t5-xl''' , truncation_side='''left''' ) elif "vicuna" in model_name: # the following was used in the original implementation: # tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left") # tokenizer.add_special_tokens({"pad_token": "[PAD]"}) # tokenizer.add_special_tokens({"bos_token": "</s>"}) # tokenizer.add_special_tokens({"eos_token": "</s>"}) # tokenizer.add_special_tokens({"unk_token": "</s>"}) lowerCamelCase_ =LlamaTokenizerFast.from_pretrained( '''huggyllama/llama-7b''' , truncation_side='''left''' , bos_token='''</s>''' , unk_token='''</s>''' ) tokenizer.add_special_tokens({'''pad_token''': '''[PAD]'''} ) lowerCamelCase_, lowerCamelCase_ =get_blipa_config(__snake_case ) lowerCamelCase_ =InstructBlipForConditionalGeneration(__snake_case ).eval() lowerCamelCase_ ={ '''instructblip-vicuna-7b''': ('''blip2_vicuna_instruct''', '''vicuna7b'''), '''instructblip-vicuna-13b''': ('''blip2_vicuna_instruct''', '''vicuna13b'''), '''instructblip-flan-t5-xl''': ('''blip2_t5_instruct''', '''flant5xl'''), '''instructblip-flan-t5-xxl''': ('''blip2_t5_instruct''', '''flant5xxl'''), } lowerCamelCase_, lowerCamelCase_ =model_name_to_original[model_name] # load original model print('''Loading original model...''' ) lowerCamelCase_ ='''cuda:1''' if torch.cuda.is_available() else '''cpu''' lowerCamelCase_ ='''cuda:2''' if torch.cuda.is_available() else '''cpu''' lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =load_model_and_preprocess( name=__snake_case , model_type=__snake_case , is_eval=__snake_case , device=__snake_case ) original_model.eval() print('''Done!''' ) # update state dict keys lowerCamelCase_ =original_model.state_dict() lowerCamelCase_ =create_rename_keys(__snake_case ) for src, dest in rename_keys: rename_key(__snake_case , __snake_case , __snake_case ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): lowerCamelCase_ =state_dict.pop(__snake_case ) if key.startswith('''Qformer.bert''' ): lowerCamelCase_ =key.replace('''Qformer.bert''' , '''qformer''' ) if "attention.self" in key: lowerCamelCase_ =key.replace('''self''' , '''attention''' ) if "llm_proj" in key: lowerCamelCase_ =key.replace('''llm_proj''' , '''language_projection''' ) if "t5_proj" in key: lowerCamelCase_ =key.replace('''t5_proj''' , '''language_projection''' ) if key.startswith('''llm_model''' ): lowerCamelCase_ =key.replace('''llm_model''' , '''language_model''' ) if key.startswith('''t5''' ): lowerCamelCase_ =key.replace('''t5''' , '''language''' ) lowerCamelCase_ =val # read in qv biases read_in_q_v_bias(__snake_case , __snake_case ) # note: weights get loaded in torch.float32 by default hf_model.load_state_dict(__snake_case , strict=__snake_case ) lowerCamelCase_ =load_demo_image() lowerCamelCase_ ='''What is unusual about this image?''' # create processor lowerCamelCase_ =BlipImageProcessor( size={'''height''': image_size, '''width''': image_size} , image_mean=__snake_case , image_std=__snake_case ) lowerCamelCase_ =InstructBlipProcessor( image_processor=__snake_case , tokenizer=__snake_case , qformer_tokenizer=__snake_case , ) lowerCamelCase_ =processor(images=__snake_case , text=__snake_case , return_tensors='''pt''' ).to(__snake_case ) # make sure processor creates exact same pixel values lowerCamelCase_ =vis_processors['''eval'''](__snake_case ).unsqueeze(0 ).to(__snake_case ) lowerCamelCase_ =inputs.pixel_values assert torch.allclose(original_pixel_values.to(pixel_values.device ) , __snake_case ) original_model.to(__snake_case ) hf_model.to(__snake_case ) with torch.no_grad(): if "vicuna" in model_name: lowerCamelCase_ =original_model({'''image''': original_pixel_values, '''text_input''': [prompt]} ).logits lowerCamelCase_ =hf_model(**__snake_case ).logits else: lowerCamelCase_ =original_model( {'''image''': original_pixel_values, '''text_input''': [prompt], '''text_output''': ['''\n''']} ).logits lowerCamelCase_ =tokenizer('''\n''' , return_tensors='''pt''' ).input_ids.to(__snake_case ) lowerCamelCase_ =label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 ) lowerCamelCase_ =hf_model(**__snake_case , labels=__snake_case ).logits print('''First values of original logits:''' , original_logits[0, :3, :3] ) print('''First values of HF logits:''' , logits[0, :3, :3] ) # assert values assert original_logits.shape == logits.shape lowerCamelCase_ =1e-4 if '''vicuna''' in model_name else 1e-5 assert torch.allclose(original_logits.to(logits.device ) , __snake_case , atol=__snake_case ) print('''Looks ok!''' ) print('''Generating with original model...''' ) lowerCamelCase_ =original_model.generate({'''image''': original_pixel_values, '''prompt''': prompt} , num_beams=5 ) # important: we need to cast the weights of the HF model to the appropriate type print('''Generating with HF model...''' ) lowerCamelCase_ =hf_model.generate( **__snake_case , do_sample=__snake_case , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , ) if "vicuna" in model_name: # convert output id 0 to 2 (eos_token_id) # TODO add this in the generate method? lowerCamelCase_ =2 print('''Original generation:''' , __snake_case ) lowerCamelCase_ =processor.batch_decode(__snake_case , skip_special_tokens=__snake_case ) lowerCamelCase_ =[text.strip() for text in output_text] print('''HF generation:''' , __snake_case ) if pytorch_dump_folder_path is not None: processor.save_pretrained(__snake_case ) hf_model.save_pretrained(__snake_case ) if push_to_hub: processor.push_to_hub(F'''Salesforce/{model_name}''' ) hf_model.push_to_hub(F'''Salesforce/{model_name}''' ) if __name__ == "__main__": a_ : Any = argparse.ArgumentParser() a_ : Any = [ """instructblip-vicuna-7b""", """instructblip-vicuna-13b""", """instructblip-flan-t5-xl""", """instructblip-flan-t5-xxl""", ] parser.add_argument( """--model_name""", default="""instructblip-flan-t5-xl""", choices=choices, type=str, help="""Path to hf config.json of model to convert""", ) parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model and processor to the hub after converting""", ) a_ : str = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
75
1
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, Union import datasets import numpy as np import torch from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.trainer_utils import get_last_checkpoint from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") a_ : Optional[Any] = logging.getLogger(__name__) @dataclass class __UpperCamelCase : lowercase : str =field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) lowercase : Optional[str] =field( default=lowerCamelCase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) lowercase : Optional[str] =field( default=lowerCamelCase__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) lowercase : Optional[str] =field( default=lowerCamelCase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) lowercase : bool =field( default=lowerCamelCase__ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , ) lowercase : str =field( default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , ) lowercase : bool =field( default=lowerCamelCase__ , metadata={ 'help': ( 'Will use the token generated when running `huggingface-cli login` (necessary to use this script ' 'with private models).' ) } , ) @dataclass class __UpperCamelCase : lowercase : Optional[str] =field(default=lowerCamelCase__ , metadata={'help': 'The input training data file (a text file).'} ) lowercase : Optional[str] =field( default=lowerCamelCase__ , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , ) lowercase : bool =field( default=lowerCamelCase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) lowercase : Optional[int] =field( default=lowerCamelCase__ , metadata={'help': 'The number of processes to use for the preprocessing.'} , ) lowercase : Optional[int] =field( default=lowerCamelCase__ , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. If passed, sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) lowercase : bool =field( default=lowerCamelCase__ , metadata={ 'help': ( 'Whether to pad all samples to the maximum sentence length. ' 'If False, will pad the samples dynamically when batching to the maximum length in the batch. More ' 'efficient on GPU but very bad for TPU.' ) } , ) lowercase : Optional[int] =field( default=lowerCamelCase__ , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of training examples to this ' 'value if set.' ) } , ) lowercase : Optional[int] =field( default=lowerCamelCase__ , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of evaluation examples to this ' 'value if set.' ) } , ) def lowercase__ ( self ): """simple docstring""" if self.train_file is not None: lowerCamelCase_ =self.train_file.split('''.''' )[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: lowerCamelCase_ =self.validation_file.split('''.''' )[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." @dataclass class __UpperCamelCase : lowercase : PreTrainedTokenizerBase lowercase : Union[bool, str, PaddingStrategy] =True lowercase : Optional[int] =None lowercase : Optional[int] =None def __call__( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ ='''label''' if '''label''' in features[0].keys() else '''labels''' lowerCamelCase_ =[feature.pop(lowerCAmelCase ) for feature in features] lowerCamelCase_ =len(lowerCAmelCase ) lowerCamelCase_ =len(features[0]['''input_ids'''] ) lowerCamelCase_ =[ [{k: v[i] for k, v in feature.items()} for i in range(lowerCAmelCase )] for feature in features ] lowerCamelCase_ =list(chain(*lowerCAmelCase ) ) lowerCamelCase_ =self.tokenizer.pad( lowerCAmelCase, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors='''pt''', ) # Un-flatten lowerCamelCase_ ={k: v.view(lowerCAmelCase, lowerCAmelCase, -1 ) for k, v in batch.items()} # Add back labels lowerCamelCase_ =torch.tensor(lowerCAmelCase, dtype=torch.intaa ) return batch def a_ ( ) -> Optional[Any]: """simple docstring""" # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowerCamelCase_ =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_swag''' , __snake_case , __snake_case ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() lowerCamelCase_ =training_args.get_process_log_level() logger.setLevel(__snake_case ) datasets.utils.logging.set_verbosity(__snake_case ) transformers.utils.logging.set_verbosity(__snake_case ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. lowerCamelCase_ =None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: lowerCamelCase_ =get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.train_file is not None or data_args.validation_file is not None: lowerCamelCase_ ={} if data_args.train_file is not None: lowerCamelCase_ =data_args.train_file if data_args.validation_file is not None: lowerCamelCase_ =data_args.validation_file lowerCamelCase_ =data_args.train_file.split('''.''' )[-1] lowerCamelCase_ =load_dataset( __snake_case , data_files=__snake_case , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: # Downloading and loading the swag dataset from the hub. lowerCamelCase_ =load_dataset( '''swag''' , '''regular''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowerCamelCase_ =AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) lowerCamelCase_ =AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) lowerCamelCase_ =AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # When using your own dataset or a different dataset from swag, you will probably need to change this. lowerCamelCase_ =[F'''ending{i}''' for i in range(4 )] lowerCamelCase_ ='''sent1''' lowerCamelCase_ ='''sent2''' if data_args.max_seq_length is None: lowerCamelCase_ =tokenizer.model_max_length if max_seq_length > 1024: logger.warning( '''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value''' ''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can''' ''' override this default with `--block_size xxx`.''' ) lowerCamelCase_ =1024 else: if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the''' F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' ) lowerCamelCase_ =min(data_args.max_seq_length , tokenizer.model_max_length ) # Preprocessing the datasets. def preprocess_function(__snake_case : Optional[int] ): lowerCamelCase_ =[[context] * 4 for context in examples[context_name]] lowerCamelCase_ =examples[question_header_name] lowerCamelCase_ =[ [F'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(__snake_case ) ] # Flatten out lowerCamelCase_ =list(chain(*__snake_case ) ) lowerCamelCase_ =list(chain(*__snake_case ) ) # Tokenize lowerCamelCase_ =tokenizer( __snake_case , __snake_case , truncation=__snake_case , max_length=__snake_case , padding='''max_length''' if data_args.pad_to_max_length else False , ) # Un-flatten return {k: [v[i : i + 4] for i in range(0 , len(__snake_case ) , 4 )] for k, v in tokenized_examples.items()} if training_args.do_train: if "train" not in raw_datasets: raise ValueError('''--do_train requires a train dataset''' ) lowerCamelCase_ =raw_datasets['''train'''] if data_args.max_train_samples is not None: lowerCamelCase_ =min(len(__snake_case ) , data_args.max_train_samples ) lowerCamelCase_ =train_dataset.select(range(__snake_case ) ) with training_args.main_process_first(desc='''train dataset map pre-processing''' ): lowerCamelCase_ =train_dataset.map( __snake_case , batched=__snake_case , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError('''--do_eval requires a validation dataset''' ) lowerCamelCase_ =raw_datasets['''validation'''] if data_args.max_eval_samples is not None: lowerCamelCase_ =min(len(__snake_case ) , data_args.max_eval_samples ) lowerCamelCase_ =eval_dataset.select(range(__snake_case ) ) with training_args.main_process_first(desc='''validation dataset map pre-processing''' ): lowerCamelCase_ =eval_dataset.map( __snake_case , batched=__snake_case , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) # Data collator lowerCamelCase_ =( default_data_collator if data_args.pad_to_max_length else DataCollatorForMultipleChoice(tokenizer=__snake_case , pad_to_multiple_of=8 if training_args.fpaa else None ) ) # Metric def compute_metrics(__snake_case : Tuple ): lowerCamelCase_, lowerCamelCase_ =eval_predictions lowerCamelCase_ =np.argmax(__snake_case , axis=1 ) return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()} # Initialize our Trainer lowerCamelCase_ =Trainer( model=__snake_case , args=__snake_case , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=__snake_case , data_collator=__snake_case , compute_metrics=__snake_case , ) # Training if training_args.do_train: lowerCamelCase_ =None if training_args.resume_from_checkpoint is not None: lowerCamelCase_ =training_args.resume_from_checkpoint elif last_checkpoint is not None: lowerCamelCase_ =last_checkpoint lowerCamelCase_ =trainer.train(resume_from_checkpoint=__snake_case ) trainer.save_model() # Saves the tokenizer too for easy upload lowerCamelCase_ =train_result.metrics lowerCamelCase_ =( data_args.max_train_samples if data_args.max_train_samples is not None else len(__snake_case ) ) lowerCamelCase_ =min(__snake_case , len(__snake_case ) ) trainer.log_metrics('''train''' , __snake_case ) trainer.save_metrics('''train''' , __snake_case ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info('''*** Evaluate ***''' ) lowerCamelCase_ =trainer.evaluate() lowerCamelCase_ =data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__snake_case ) lowerCamelCase_ =min(__snake_case , len(__snake_case ) ) trainer.log_metrics('''eval''' , __snake_case ) trainer.save_metrics('''eval''' , __snake_case ) lowerCamelCase_ ={ '''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''multiple-choice''', '''dataset_tags''': '''swag''', '''dataset_args''': '''regular''', '''dataset''': '''SWAG''', '''language''': '''en''', } if training_args.push_to_hub: trainer.push_to_hub(**__snake_case ) else: trainer.create_model_card(**__snake_case ) def a_ ( __snake_case : Dict ) -> Union[str, Any]: """simple docstring""" # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
75
'''simple docstring''' from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class __UpperCamelCase ( lowerCamelCase__ ): def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return 0.0 def a_ ( __snake_case : np.ndarray , __snake_case : int ) -> tuple[int | float, int | float]: """simple docstring""" lowerCamelCase_ =min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) lowerCamelCase_ =max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def a_ ( __snake_case : FilterType , __snake_case : int ) -> None: """simple docstring""" lowerCamelCase_ =512 lowerCamelCase_ =[1] + [0] * (size - 1) lowerCamelCase_ =[filter_type.process(__snake_case ) for item in inputs] lowerCamelCase_ =[0] * (samplerate - size) # zero-padding outputs += filler lowerCamelCase_ =np.abs(np.fft.fft(__snake_case ) ) lowerCamelCase_ =20 * np.logaa(__snake_case ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('''Frequency (Hz)''' ) plt.xscale('''log''' ) # Display within reasonable bounds lowerCamelCase_ =get_bounds(__snake_case , __snake_case ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel('''Gain (dB)''' ) plt.plot(__snake_case ) plt.show() def a_ ( __snake_case : FilterType , __snake_case : int ) -> None: """simple docstring""" lowerCamelCase_ =512 lowerCamelCase_ =[1] + [0] * (size - 1) lowerCamelCase_ =[filter_type.process(__snake_case ) for item in inputs] lowerCamelCase_ =[0] * (samplerate - size) # zero-padding outputs += filler lowerCamelCase_ =np.angle(np.fft.fft(__snake_case ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('''Frequency (Hz)''' ) plt.xscale('''log''' ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel('''Phase shift (Radians)''' ) plt.plot(np.unwrap(__snake_case , -2 * pi ) ) plt.show()
75
1
'''simple docstring''' def a_ ( __snake_case : list ) -> bool: """simple docstring""" if not isinstance(__snake_case , __snake_case ): raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' ) if len(__snake_case ) == 0: raise ValueError('''Input list must be a non empty list''' ) if len(__snake_case ) == 1: return True lowerCamelCase_ =series[1] - series[0] for index in range(len(__snake_case ) - 1 ): if series[index + 1] - series[index] != common_diff: return False return True def a_ ( __snake_case : list ) -> float: """simple docstring""" if not isinstance(__snake_case , __snake_case ): raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' ) if len(__snake_case ) == 0: raise ValueError('''Input list must be a non empty list''' ) lowerCamelCase_ =0 for val in series: answer += val return answer / len(__snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
75
'''simple docstring''' import os import unittest from transformers import FunnelTokenizer, FunnelTokenizerFast from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ): lowercase : Union[str, Any] =FunnelTokenizer lowercase : List[str] =FunnelTokenizerFast lowercase : Union[str, Any] =True lowercase : int =True def lowercase__ ( self ): """simple docstring""" super().setUp() lowerCamelCase_ =[ '''<unk>''', '''<cls>''', '''<sep>''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] lowerCamelCase_ =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def lowercase__ ( self, **lowerCAmelCase ): """simple docstring""" return FunnelTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase ) def lowercase__ ( self, **lowerCAmelCase ): """simple docstring""" return FunnelTokenizerFast.from_pretrained(self.tmpdirname, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ ='''UNwant\u00E9d,running''' lowerCamelCase_ ='''unwanted, running''' return input_text, output_text def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.tokenizer_class(self.vocab_file ) lowerCamelCase_ =tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(lowerCAmelCase, ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ), [7, 4, 5, 10, 8, 9] ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_tokenizers(do_lower_case=lowerCAmelCase ) for tokenizer in tokenizers: lowerCamelCase_ =tokenizer('''UNwant\u00E9d,running''' ) lowerCamelCase_ =len(inputs['''input_ids'''] ) - 1 self.assertListEqual(inputs['''token_type_ids'''], [2] + [0] * sentence_len ) lowerCamelCase_ =tokenizer('''UNwant\u00E9d,running''', '''UNwant\u00E9d,running''' ) self.assertListEqual(inputs['''token_type_ids'''], [2] + [0] * sentence_len + [1] * sentence_len )
75
1
'''simple docstring''' import os a_ : Optional[int] = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 1_00, """D""": 5_00, """M""": 10_00} def a_ ( __snake_case : str ) -> int: """simple docstring""" lowerCamelCase_ =0 lowerCamelCase_ =0 while index < len(__snake_case ) - 1: lowerCamelCase_ =SYMBOLS[numerals[index]] lowerCamelCase_ =SYMBOLS[numerals[index + 1]] if current_value < next_value: total_value -= current_value else: total_value += current_value index += 1 total_value += SYMBOLS[numerals[index]] return total_value def a_ ( __snake_case : int ) -> str: """simple docstring""" lowerCamelCase_ ='''''' lowerCamelCase_ =num // 1000 numerals += m_count * "M" num %= 1000 lowerCamelCase_ =num // 100 if c_count == 9: numerals += "CM" c_count -= 9 elif c_count == 4: numerals += "CD" c_count -= 4 if c_count >= 5: numerals += "D" c_count -= 5 numerals += c_count * "C" num %= 100 lowerCamelCase_ =num // 10 if x_count == 9: numerals += "XC" x_count -= 9 elif x_count == 4: numerals += "XL" x_count -= 4 if x_count >= 5: numerals += "L" x_count -= 5 numerals += x_count * "X" num %= 10 if num == 9: numerals += "IX" num -= 9 elif num == 4: numerals += "IV" num -= 4 if num >= 5: numerals += "V" num -= 5 numerals += num * "I" return numerals def a_ ( __snake_case : str = "/p089_roman.txt" ) -> int: """simple docstring""" lowerCamelCase_ =0 with open(os.path.dirname(__snake_case ) + roman_numerals_filename ) as filea: lowerCamelCase_ =filea.readlines() for line in lines: lowerCamelCase_ =line.strip() lowerCamelCase_ =parse_roman_numerals(__snake_case ) lowerCamelCase_ =generate_roman_numerals(__snake_case ) savings += len(__snake_case ) - len(__snake_case ) return savings if __name__ == "__main__": print(F"""{solution() = }""")
75
'''simple docstring''' import argparse import os import re import torch from flax.traverse_util import flatten_dict from tax import checkpoints from transformers import ( AutoTokenizer, PixaStructConfig, PixaStructForConditionalGeneration, PixaStructImageProcessor, PixaStructProcessor, PixaStructTextConfig, PixaStructVisionConfig, ) def a_ ( __snake_case : Any ) -> int: """simple docstring""" lowerCamelCase_ =checkpoints.load_tax_checkpoint(__snake_case ) lowerCamelCase_ =flatten_dict(__snake_case ) return flax_params def a_ ( __snake_case : Dict ) -> Optional[int]: """simple docstring""" lowerCamelCase_ ={} lowerCamelCase_ ={ '''token_embedder''': '''embeddings''', '''encoder_norm''': '''layernorm''', '''kernel''': '''weight''', '''.out''': '''.output''', '''scale''': '''weight''', '''embedders_0.pos_embedding''': '''row_embedder.weight''', '''embedders_1.pos_embedding''': '''column_embedder.weight''', } lowerCamelCase_ ={ '''query''': '''attention.query''', '''key''': '''attention.key''', '''value''': '''attention.value''', '''output.dense''': '''output''', '''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''', '''pre_self_attention_layer_norm''': '''self_attention.layer_norm''', '''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''', '''mlp.''': '''mlp.DenseReluDense.''', '''pre_mlp_layer_norm''': '''mlp.layer_norm''', '''self_attention.o''': '''self_attention.attention.o''', '''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''', '''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''', '''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''', '''decoder.logits_dense.weight''': '''decoder.lm_head.weight''', } for key in flax_dict.keys(): if "target" in key: # remove the first prefix from the key lowerCamelCase_ ='''.'''.join(key[1:] ) # rename the key for old, new in CONVERSION_MAPPING.items(): lowerCamelCase_ =new_key.replace(__snake_case , __snake_case ) if "decoder" in new_key: for old, new in DECODER_CONVERSION_MAPPING.items(): lowerCamelCase_ =new_key.replace(__snake_case , __snake_case ) if "layers" in new_key and "decoder" not in new_key: # use regex to replace the layer number lowerCamelCase_ =re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , __snake_case ) lowerCamelCase_ =new_key.replace('''encoder''' , '''encoder.encoder''' ) elif "layers" in new_key and "decoder" in new_key: # use regex to replace the layer number lowerCamelCase_ =re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , __snake_case ) lowerCamelCase_ =flax_dict[key] lowerCamelCase_ ={} # convert converted_dict into torch format for key in converted_dict.keys(): if ("embed_tokens" not in key) and ("embedder" not in key): lowerCamelCase_ =torch.from_numpy(converted_dict[key].T ) else: lowerCamelCase_ =torch.from_numpy(converted_dict[key] ) return converted_torch_dict def a_ ( __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Any=False , __snake_case : Optional[int]=False ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ =get_flax_param(__snake_case ) if not use_large: lowerCamelCase_ =PixaStructVisionConfig() lowerCamelCase_ =PixaStructTextConfig() else: lowerCamelCase_ =PixaStructVisionConfig( hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 ) lowerCamelCase_ =PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 ) lowerCamelCase_ =PixaStructConfig( vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=__snake_case ) lowerCamelCase_ =PixaStructForConditionalGeneration(__snake_case ) lowerCamelCase_ =rename_and_convert_flax_params(__snake_case ) model.load_state_dict(__snake_case ) lowerCamelCase_ =AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' ) lowerCamelCase_ =PixaStructImageProcessor() lowerCamelCase_ =PixaStructProcessor(image_processor=__snake_case , tokenizer=__snake_case ) if use_large: lowerCamelCase_ =4096 lowerCamelCase_ =True # mkdir if needed os.makedirs(__snake_case , exist_ok=__snake_case ) model.save_pretrained(__snake_case ) processor.save_pretrained(__snake_case ) print('''Model saved in {}'''.format(__snake_case ) ) if __name__ == "__main__": a_ : Optional[int] = argparse.ArgumentParser() parser.add_argument("""--t5x_checkpoint_path""", default=None, type=str, help="""Path to the original T5x checkpoint.""") parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--use_large""", action="""store_true""", help="""Use large model.""") parser.add_argument("""--is_vqa""", action="""store_true""", help="""Use large model.""") a_ : Tuple = parser.parse_args() convert_pixastruct_original_pytorch_checkpoint_to_hf( args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large )
75
1
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class __UpperCamelCase ( lowerCamelCase__ ): lowercase : List[Any] ='naver-clova-ix/donut-base-finetuned-docvqa' lowercase : List[str] =( 'This is a tool that answers a question about an document (pdf). It takes an input named `document` which ' 'should be the document containing the information, as well as a `question` that is the question about the ' 'document. It returns a text that contains the answer to the question.' ) lowercase : Union[str, Any] ='document_qa' lowercase : Tuple =AutoProcessor lowercase : Dict =VisionEncoderDecoderModel lowercase : List[Any] =['image', 'text'] lowercase : Any =['text'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" if not is_vision_available(): raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' ) super().__init__(*lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ ='''<s_docvqa><s_question>{user_input}</s_question><s_answer>''' lowerCamelCase_ =task_prompt.replace('''{user_input}''', lowerCAmelCase ) lowerCamelCase_ =self.pre_processor.tokenizer( lowerCAmelCase, add_special_tokens=lowerCAmelCase, return_tensors='''pt''' ).input_ids lowerCamelCase_ =self.pre_processor(lowerCAmelCase, return_tensors='''pt''' ).pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return self.model.generate( inputs['''pixel_values'''].to(self.device ), decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ), max_length=self.model.decoder.config.max_position_embeddings, early_stopping=lowerCAmelCase, pad_token_id=self.pre_processor.tokenizer.pad_token_id, eos_token_id=self.pre_processor.tokenizer.eos_token_id, use_cache=lowerCAmelCase, num_beams=1, bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]], return_dict_in_generate=lowerCAmelCase, ).sequences def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.pre_processor.batch_decode(lowerCAmelCase )[0] lowerCamelCase_ =sequence.replace(self.pre_processor.tokenizer.eos_token, '''''' ) lowerCamelCase_ =sequence.replace(self.pre_processor.tokenizer.pad_token, '''''' ) lowerCamelCase_ =re.sub(R'''<.*?>''', '''''', lowerCAmelCase, count=1 ).strip() # remove first task start token lowerCamelCase_ =self.pre_processor.tokenajson(lowerCAmelCase ) return sequence["answer"]
75
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging a_ : Union[str, Any] = logging.get_logger(__name__) if is_vision_available(): import PIL class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Tuple =['pixel_values'] def __init__( self, lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = PILImageResampling.BICUBIC, lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = True, lowerCAmelCase = 1 / 255, lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = True, **lowerCAmelCase, ): """simple docstring""" super().__init__(**lowerCAmelCase ) lowerCamelCase_ =size if size is not None else {'''shortest_edge''': 224} lowerCamelCase_ =get_size_dict(lowerCAmelCase, default_to_square=lowerCAmelCase ) lowerCamelCase_ =crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} lowerCamelCase_ =get_size_dict(lowerCAmelCase, default_to_square=lowerCAmelCase, param_name='''crop_size''' ) lowerCamelCase_ =do_resize lowerCamelCase_ =size lowerCamelCase_ =resample lowerCamelCase_ =do_center_crop lowerCamelCase_ =crop_size lowerCamelCase_ =do_rescale lowerCamelCase_ =rescale_factor lowerCamelCase_ =do_normalize lowerCamelCase_ =image_mean if image_mean is not None else OPENAI_CLIP_MEAN lowerCamelCase_ =image_std if image_std is not None else OPENAI_CLIP_STD lowerCamelCase_ =do_convert_rgb def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = PILImageResampling.BICUBIC, lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =get_size_dict(lowerCAmelCase, default_to_square=lowerCAmelCase ) if "shortest_edge" not in size: raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) lowerCamelCase_ =get_resize_output_image_size(lowerCAmelCase, size=size['''shortest_edge'''], default_to_square=lowerCAmelCase ) return resize(lowerCAmelCase, size=lowerCAmelCase, resample=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =get_size_dict(lowerCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(lowerCAmelCase, size=(size['''height'''], size['''width''']), data_format=lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" return rescale(lowerCAmelCase, scale=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" return normalize(lowerCAmelCase, mean=lowerCAmelCase, std=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = ChannelDimension.FIRST, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =do_resize if do_resize is not None else self.do_resize lowerCamelCase_ =size if size is not None else self.size lowerCamelCase_ =get_size_dict(lowerCAmelCase, param_name='''size''', default_to_square=lowerCAmelCase ) lowerCamelCase_ =resample if resample is not None else self.resample lowerCamelCase_ =do_center_crop if do_center_crop is not None else self.do_center_crop lowerCamelCase_ =crop_size if crop_size is not None else self.crop_size lowerCamelCase_ =get_size_dict(lowerCAmelCase, param_name='''crop_size''', default_to_square=lowerCAmelCase ) lowerCamelCase_ =do_rescale if do_rescale is not None else self.do_rescale lowerCamelCase_ =rescale_factor if rescale_factor is not None else self.rescale_factor lowerCamelCase_ =do_normalize if do_normalize is not None else self.do_normalize lowerCamelCase_ =image_mean if image_mean is not None else self.image_mean lowerCamelCase_ =image_std if image_std is not None else self.image_std lowerCamelCase_ =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb lowerCamelCase_ =make_list_of_images(lowerCAmelCase ) if not valid_images(lowerCAmelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: lowerCamelCase_ =[convert_to_rgb(lowerCAmelCase ) for image in images] # All transformations expect numpy arrays. lowerCamelCase_ =[to_numpy_array(lowerCAmelCase ) for image in images] if do_resize: lowerCamelCase_ =[self.resize(image=lowerCAmelCase, size=lowerCAmelCase, resample=lowerCAmelCase ) for image in images] if do_center_crop: lowerCamelCase_ =[self.center_crop(image=lowerCAmelCase, size=lowerCAmelCase ) for image in images] if do_rescale: lowerCamelCase_ =[self.rescale(image=lowerCAmelCase, scale=lowerCAmelCase ) for image in images] if do_normalize: lowerCamelCase_ =[self.normalize(image=lowerCAmelCase, mean=lowerCAmelCase, std=lowerCAmelCase ) for image in images] lowerCamelCase_ =[to_channel_dimension_format(lowerCAmelCase, lowerCAmelCase ) for image in images] lowerCamelCase_ ={'''pixel_values''': images} return BatchFeature(data=lowerCAmelCase, tensor_type=lowerCAmelCase )
75
1
'''simple docstring''' from __future__ import annotations import time a_ : Optional[int] = list[tuple[int, int]] a_ : Any = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] a_ : Dict = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right class __UpperCamelCase : def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =pos_x lowerCamelCase_ =pos_y lowerCamelCase_ =(pos_y, pos_x) lowerCamelCase_ =goal_x lowerCamelCase_ =goal_y lowerCamelCase_ =parent class __UpperCamelCase : def __init__( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =Node(start[1], start[0], goal[1], goal[0], lowerCAmelCase ) lowerCamelCase_ =Node(goal[1], goal[0], goal[1], goal[0], lowerCAmelCase ) lowerCamelCase_ =[self.start] lowerCamelCase_ =False def lowercase__ ( self ): """simple docstring""" while self.node_queue: lowerCamelCase_ =self.node_queue.pop(0 ) if current_node.pos == self.target.pos: lowerCamelCase_ =True return self.retrace_path(lowerCAmelCase ) lowerCamelCase_ =self.get_successors(lowerCAmelCase ) for node in successors: self.node_queue.append(lowerCAmelCase ) if not self.reached: return [self.start.pos] return None def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =[] for action in delta: lowerCamelCase_ =parent.pos_x + action[1] lowerCamelCase_ =parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCAmelCase ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node(lowerCAmelCase, lowerCAmelCase, self.target.pos_y, self.target.pos_x, lowerCAmelCase ) ) return successors def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =node lowerCamelCase_ =[] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) lowerCamelCase_ =current_node.parent path.reverse() return path class __UpperCamelCase : def __init__( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =BreadthFirstSearch(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =BreadthFirstSearch(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =False def lowercase__ ( self ): """simple docstring""" while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue: lowerCamelCase_ =self.fwd_bfs.node_queue.pop(0 ) lowerCamelCase_ =self.bwd_bfs.node_queue.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: lowerCamelCase_ =True return self.retrace_bidirectional_path( lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =current_bwd_node lowerCamelCase_ =current_fwd_node lowerCamelCase_ ={ self.fwd_bfs: self.fwd_bfs.get_successors(lowerCAmelCase ), self.bwd_bfs: self.bwd_bfs.get_successors(lowerCAmelCase ), } for bfs in [self.fwd_bfs, self.bwd_bfs]: for node in successors[bfs]: bfs.node_queue.append(lowerCAmelCase ) if not self.reached: return [self.fwd_bfs.start.pos] return None def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.fwd_bfs.retrace_path(lowerCAmelCase ) lowerCamelCase_ =self.bwd_bfs.retrace_path(lowerCAmelCase ) bwd_path.pop() bwd_path.reverse() lowerCamelCase_ =fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] import doctest doctest.testmod() a_ : Optional[int] = (0, 0) a_ : List[str] = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) a_ : int = time.time() a_ : List[Any] = BreadthFirstSearch(init, goal) a_ : Union[str, Any] = bfs.search() a_ : Union[str, Any] = time.time() - start_bfs_time print("""Unidirectional BFS computation time : """, bfs_time) a_ : Optional[Any] = time.time() a_ : Dict = BidirectionalBreadthFirstSearch(init, goal) a_ : Dict = bd_bfs.search() a_ : Optional[Any] = time.time() - start_bd_bfs_time print("""Bidirectional BFS computation time : """, bd_bfs_time)
75
'''simple docstring''' from __future__ import annotations def a_ ( __snake_case : str , __snake_case : list[str] | None = None , __snake_case : dict[str, float] | None = None , __snake_case : bool = False , ) -> tuple[int, float, str]: """simple docstring""" lowerCamelCase_ =cipher_alphabet or [chr(__snake_case ) for i in range(97 , 123 )] # If the argument is None or the user provided an empty dictionary if not frequencies_dict: # Frequencies of letters in the english language (how much they show up) lowerCamelCase_ ={ '''a''': 0.0_8_4_9_7, '''b''': 0.0_1_4_9_2, '''c''': 0.0_2_2_0_2, '''d''': 0.0_4_2_5_3, '''e''': 0.1_1_1_6_2, '''f''': 0.0_2_2_2_8, '''g''': 0.0_2_0_1_5, '''h''': 0.0_6_0_9_4, '''i''': 0.0_7_5_4_6, '''j''': 0.0_0_1_5_3, '''k''': 0.0_1_2_9_2, '''l''': 0.0_4_0_2_5, '''m''': 0.0_2_4_0_6, '''n''': 0.0_6_7_4_9, '''o''': 0.0_7_5_0_7, '''p''': 0.0_1_9_2_9, '''q''': 0.0_0_0_9_5, '''r''': 0.0_7_5_8_7, '''s''': 0.0_6_3_2_7, '''t''': 0.0_9_3_5_6, '''u''': 0.0_2_7_5_8, '''v''': 0.0_0_9_7_8, '''w''': 0.0_2_5_6_0, '''x''': 0.0_0_1_5_0, '''y''': 0.0_1_9_9_4, '''z''': 0.0_0_0_7_7, } else: # Custom frequencies dictionary lowerCamelCase_ =frequencies_dict if not case_sensitive: lowerCamelCase_ =ciphertext.lower() # Chi squared statistic values lowerCamelCase_ ={} # cycle through all of the shifts for shift in range(len(__snake_case ) ): lowerCamelCase_ ='''''' # decrypt the message with the shift for letter in ciphertext: try: # Try to index the letter in the alphabet lowerCamelCase_ =(alphabet_letters.index(letter.lower() ) - shift) % len( __snake_case ) decrypted_with_shift += ( alphabet_letters[new_key].upper() if case_sensitive and letter.isupper() else alphabet_letters[new_key] ) except ValueError: # Append the character if it isn't in the alphabet decrypted_with_shift += letter lowerCamelCase_ =0.0 # Loop through each letter in the decoded message with the shift for letter in decrypted_with_shift: if case_sensitive: lowerCamelCase_ =letter.lower() if letter in frequencies: # Get the amount of times the letter occurs in the message lowerCamelCase_ =decrypted_with_shift.lower().count(__snake_case ) # Get the excepcted amount of times the letter should appear based # on letter frequencies lowerCamelCase_ =frequencies[letter] * occurrences # Complete the chi squared statistic formula lowerCamelCase_ =((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value else: if letter.lower() in frequencies: # Get the amount of times the letter occurs in the message lowerCamelCase_ =decrypted_with_shift.count(__snake_case ) # Get the excepcted amount of times the letter should appear based # on letter frequencies lowerCamelCase_ =frequencies[letter] * occurrences # Complete the chi squared statistic formula lowerCamelCase_ =((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary lowerCamelCase_ =( chi_squared_statistic, decrypted_with_shift, ) # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic def chi_squared_statistic_values_sorting_key(__snake_case : int ) -> tuple[float, str]: return chi_squared_statistic_values[key] lowerCamelCase_ =min( __snake_case , key=__snake_case , ) # Get all the data from the most likely cipher (key, decoded message) ( ( lowerCamelCase_ ), ( lowerCamelCase_ ), ) =chi_squared_statistic_values[most_likely_cipher] # Return the data on the most likely shift return ( most_likely_cipher, most_likely_cipher_chi_squared_value, decoded_most_likely_cipher, )
75
1
'''simple docstring''' import unittest from transformers import BigBirdConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax from transformers.models.big_bird.modeling_flax_big_bird import ( FlaxBigBirdForCausalLM, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForPreTraining, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, FlaxBigBirdModel, ) class __UpperCamelCase ( unittest.TestCase ): def __init__( self, lowerCAmelCase, lowerCAmelCase=2, lowerCAmelCase=56, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=99, lowerCAmelCase=32, lowerCAmelCase=2, lowerCAmelCase=2, lowerCAmelCase=7, lowerCAmelCase="gelu_new", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=16, lowerCAmelCase=2, lowerCAmelCase=0.0_2, lowerCAmelCase=4, lowerCAmelCase="block_sparse", lowerCAmelCase=True, lowerCAmelCase=False, lowerCAmelCase=2, lowerCAmelCase=3, ): """simple docstring""" lowerCamelCase_ =parent lowerCamelCase_ =batch_size lowerCamelCase_ =seq_length lowerCamelCase_ =is_training lowerCamelCase_ =use_attention_mask lowerCamelCase_ =use_token_type_ids lowerCamelCase_ =use_labels lowerCamelCase_ =vocab_size lowerCamelCase_ =hidden_size lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =intermediate_size lowerCamelCase_ =hidden_act lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =type_vocab_size lowerCamelCase_ =type_sequence_label_size lowerCamelCase_ =initializer_range lowerCamelCase_ =num_choices lowerCamelCase_ =rescale_embeddings lowerCamelCase_ =attention_type lowerCamelCase_ =use_bias lowerCamelCase_ =block_size lowerCamelCase_ =num_random_blocks def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) lowerCamelCase_ =None if self.use_attention_mask: lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase_ =None if self.use_token_type_ids: lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size ) lowerCamelCase_ =BigBirdConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCAmelCase, initializer_range=self.initializer_range, attention_type=self.attention_type, block_size=self.block_size, num_random_blocks=self.num_random_blocks, use_bias=self.use_bias, rescale_embeddings=self.rescale_embeddings, ) return config, input_ids, token_type_ids, attention_mask def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.prepare_config_and_inputs() lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =config_and_inputs lowerCamelCase_ ={ '''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask, } return config, inputs_dict @require_flax class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ): lowercase : Any =( ( FlaxBigBirdForCausalLM, FlaxBigBirdModel, FlaxBigBirdForPreTraining, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, ) if is_flax_available() else () ) lowercase : int =False lowercase : Optional[int] =False def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =FlaxBigBirdModelTester(self ) @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def lowercase__ ( self ): """simple docstring""" super().test_from_pretrained_save_pretrained() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def lowercase__ ( self ): """simple docstring""" super().test_from_pretrained_with_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def lowercase__ ( self ): """simple docstring""" super().test_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def lowercase__ ( self ): """simple docstring""" super().test_hidden_states_output() @slow def lowercase__ ( self ): """simple docstring""" for model_class_name in self.all_model_classes: lowerCamelCase_ =model_class_name.from_pretrained('''google/bigbird-roberta-base''' ) self.assertIsNotNone(lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" if self.test_attn_probs: super().test_attention_outputs() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def lowercase__ ( self ): """simple docstring""" lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowerCamelCase_ =self._prepare_for_class(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =model_class(lowerCAmelCase ) @jax.jit def model_jitted(lowerCAmelCase, lowerCAmelCase=None, **lowerCAmelCase ): return model(input_ids=lowerCAmelCase, attention_mask=lowerCAmelCase, **lowerCAmelCase ) with self.subTest('''JIT Enabled''' ): lowerCamelCase_ =model_jitted(**lowerCAmelCase ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): lowerCamelCase_ =model_jitted(**lowerCAmelCase ).to_tuple() self.assertEqual(len(lowerCAmelCase ), len(lowerCAmelCase ) ) for jitted_output, output in zip(lowerCAmelCase, lowerCAmelCase ): self.assertEqual(jitted_output.shape, output.shape ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=1e-5, lowerCAmelCase="outputs", lowerCAmelCase=None ): """simple docstring""" if name.startswith('''outputs.attentions''' ): return else: super().check_pt_flax_outputs(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
75
'''simple docstring''' import importlib import inspect import json import os import re import shutil import sys from pathlib import Path from typing import Dict, Optional, Union from urllib import request from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info from packaging import version from .. import __version__ from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging a_ : List[Any] = ( """https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py""" ) a_ : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name def a_ ( ) -> List[str]: """simple docstring""" lowerCamelCase_ ='''https://pypi.org/pypi/diffusers/json''' lowerCamelCase_ =json.loads(request.urlopen(__snake_case ).read() )['''releases'''].keys() return sorted(__snake_case , key=lambda __snake_case : version.Version(__snake_case ) ) def a_ ( ) -> str: """simple docstring""" # This function has already been executed if HF_MODULES_CACHE already is in the Python path. if HF_MODULES_CACHE in sys.path: return sys.path.append(__snake_case ) os.makedirs(__snake_case , exist_ok=__snake_case ) lowerCamelCase_ =Path(__snake_case ) / '''__init__.py''' if not init_path.exists(): init_path.touch() def a_ ( __snake_case : Union[str, os.PathLike] ) -> List[str]: """simple docstring""" init_hf_modules() lowerCamelCase_ =Path(__snake_case ) / name # If the parent module does not exist yet, recursively create it. if not dynamic_module_path.parent.exists(): create_dynamic_module(dynamic_module_path.parent ) os.makedirs(__snake_case , exist_ok=__snake_case ) lowerCamelCase_ =dynamic_module_path / '''__init__.py''' if not init_path.exists(): init_path.touch() def a_ ( __snake_case : Tuple ) -> List[str]: """simple docstring""" with open(__snake_case , '''r''' , encoding='''utf-8''' ) as f: lowerCamelCase_ =f.read() # Imports of the form `import .xxx` lowerCamelCase_ =re.findall('''^\s*import\s+\.(\S+)\s*$''' , __snake_case , flags=re.MULTILINE ) # Imports of the form `from .xxx import yyy` relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , __snake_case , flags=re.MULTILINE ) # Unique-ify return list(set(__snake_case ) ) def a_ ( __snake_case : str ) -> str: """simple docstring""" lowerCamelCase_ =False lowerCamelCase_ =[module_file] lowerCamelCase_ =[] # Let's recurse through all relative imports while not no_change: lowerCamelCase_ =[] for f in files_to_check: new_imports.extend(get_relative_imports(__snake_case ) ) lowerCamelCase_ =Path(__snake_case ).parent lowerCamelCase_ =[str(module_path / m ) for m in new_imports] lowerCamelCase_ =[f for f in new_import_files if f not in all_relative_imports] lowerCamelCase_ =[F'''{f}.py''' for f in new_import_files] lowerCamelCase_ =len(__snake_case ) == 0 all_relative_imports.extend(__snake_case ) return all_relative_imports def a_ ( __snake_case : Union[str, Any] ) -> Optional[int]: """simple docstring""" with open(__snake_case , '''r''' , encoding='''utf-8''' ) as f: lowerCamelCase_ =f.read() # Imports of the form `import xxx` lowerCamelCase_ =re.findall('''^\s*import\s+(\S+)\s*$''' , __snake_case , flags=re.MULTILINE ) # Imports of the form `from xxx import yyy` imports += re.findall('''^\s*from\s+(\S+)\s+import''' , __snake_case , flags=re.MULTILINE ) # Only keep the top-level module lowerCamelCase_ =[imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )] # Unique-ify and test we got them all lowerCamelCase_ =list(set(__snake_case ) ) lowerCamelCase_ =[] for imp in imports: try: importlib.import_module(__snake_case ) except ImportError: missing_packages.append(__snake_case ) if len(__snake_case ) > 0: raise ImportError( '''This modeling file requires the following packages that were not found in your environment: ''' F'''{', '.join(__snake_case )}. Run `pip install {' '.join(__snake_case )}`''' ) return get_relative_imports(__snake_case ) def a_ ( __snake_case : Tuple , __snake_case : Tuple ) -> List[Any]: """simple docstring""" lowerCamelCase_ =module_path.replace(os.path.sep , '''.''' ) lowerCamelCase_ =importlib.import_module(__snake_case ) if class_name is None: return find_pipeline_class(__snake_case ) return getattr(__snake_case , __snake_case ) def a_ ( __snake_case : Dict ) -> Any: """simple docstring""" from ..pipelines import DiffusionPipeline lowerCamelCase_ =dict(inspect.getmembers(__snake_case , inspect.isclass ) ) lowerCamelCase_ =None for cls_name, cls in cls_members.items(): if ( cls_name != DiffusionPipeline.__name__ and issubclass(cls , __snake_case ) and cls.__module__.split('''.''' )[0] != "diffusers" ): if pipeline_class is not None: raise ValueError( F'''Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:''' F''' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in''' F''' {loaded_module}.''' ) lowerCamelCase_ =cls return pipeline_class def a_ ( __snake_case : Union[str, os.PathLike] , __snake_case : str , __snake_case : Optional[Union[str, os.PathLike]] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[Dict[str, str]] = None , __snake_case : Optional[Union[bool, str]] = None , __snake_case : Optional[str] = None , __snake_case : bool = False , ) -> Optional[int]: """simple docstring""" lowerCamelCase_ =str(__snake_case ) lowerCamelCase_ =os.path.join(__snake_case , __snake_case ) if os.path.isfile(__snake_case ): lowerCamelCase_ =module_file_or_url lowerCamelCase_ ='''local''' elif pretrained_model_name_or_path.count('''/''' ) == 0: lowerCamelCase_ =get_diffusers_versions() # cut ".dev0" lowerCamelCase_ ='''v''' + '''.'''.join(__version__.split('''.''' )[:3] ) # retrieve github version that matches if revision is None: lowerCamelCase_ =latest_version if latest_version[1:] in available_versions else '''main''' logger.info(F'''Defaulting to latest_version: {revision}.''' ) elif revision in available_versions: lowerCamelCase_ =F'''v{revision}''' elif revision == "main": lowerCamelCase_ =revision else: raise ValueError( F'''`custom_revision`: {revision} does not exist. Please make sure to choose one of''' F''' {', '.join(available_versions + ['main'] )}.''' ) # community pipeline on GitHub lowerCamelCase_ =COMMUNITY_PIPELINES_URL.format(revision=__snake_case , pipeline=__snake_case ) try: lowerCamelCase_ =cached_download( __snake_case , cache_dir=__snake_case , force_download=__snake_case , proxies=__snake_case , resume_download=__snake_case , local_files_only=__snake_case , use_auth_token=__snake_case , ) lowerCamelCase_ ='''git''' lowerCamelCase_ =pretrained_model_name_or_path + '''.py''' except EnvironmentError: logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' ) raise else: try: # Load from URL or cache if already cached lowerCamelCase_ =hf_hub_download( __snake_case , __snake_case , cache_dir=__snake_case , force_download=__snake_case , proxies=__snake_case , resume_download=__snake_case , local_files_only=__snake_case , use_auth_token=__snake_case , ) lowerCamelCase_ =os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) ) except EnvironmentError: logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' ) raise # Check we have all the requirements in our environment lowerCamelCase_ =check_imports(__snake_case ) # Now we move the module inside our cached dynamic modules. lowerCamelCase_ =DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule create_dynamic_module(__snake_case ) lowerCamelCase_ =Path(__snake_case ) / full_submodule if submodule == "local" or submodule == "git": # We always copy local files (we could hash the file to see if there was a change, and give them the name of # that hash, to only copy when there is a modification but it seems overkill for now). # The only reason we do the copy is to avoid putting too many folders in sys.path. shutil.copy(__snake_case , submodule_path / module_file ) for module_needed in modules_needed: lowerCamelCase_ =F'''{module_needed}.py''' shutil.copy(os.path.join(__snake_case , __snake_case ) , submodule_path / module_needed ) else: # Get the commit hash # TODO: we will get this info in the etag soon, so retrieve it from there and not here. if isinstance(__snake_case , __snake_case ): lowerCamelCase_ =use_auth_token elif use_auth_token is True: lowerCamelCase_ =HfFolder.get_token() else: lowerCamelCase_ =None lowerCamelCase_ =model_info(__snake_case , revision=__snake_case , token=__snake_case ).sha # The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the # benefit of versioning. lowerCamelCase_ =submodule_path / commit_hash lowerCamelCase_ =full_submodule + os.path.sep + commit_hash create_dynamic_module(__snake_case ) if not (submodule_path / module_file).exists(): shutil.copy(__snake_case , submodule_path / module_file ) # Make sure we also have every file with relative for module_needed in modules_needed: if not (submodule_path / module_needed).exists(): get_cached_module_file( __snake_case , F'''{module_needed}.py''' , cache_dir=__snake_case , force_download=__snake_case , resume_download=__snake_case , proxies=__snake_case , use_auth_token=__snake_case , revision=__snake_case , local_files_only=__snake_case , ) return os.path.join(__snake_case , __snake_case ) def a_ ( __snake_case : Union[str, os.PathLike] , __snake_case : str , __snake_case : Optional[str] = None , __snake_case : Optional[Union[str, os.PathLike]] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[Dict[str, str]] = None , __snake_case : Optional[Union[bool, str]] = None , __snake_case : Optional[str] = None , __snake_case : bool = False , **__snake_case : Optional[int] , ) -> Optional[int]: """simple docstring""" lowerCamelCase_ =get_cached_module_file( __snake_case , __snake_case , cache_dir=__snake_case , force_download=__snake_case , resume_download=__snake_case , proxies=__snake_case , use_auth_token=__snake_case , revision=__snake_case , local_files_only=__snake_case , ) return get_class_in_module(__snake_case , final_module.replace('''.py''' , '''''' ) )
75
1
'''simple docstring''' from sklearn.metrics import fa_score, matthews_corrcoef import datasets from .record_evaluation import evaluate as evaluate_record a_ : int = """\ @article{wang2019superglue, title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems}, author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R}, journal={arXiv preprint arXiv:1905.00537}, year={2019} } """ a_ : int = """\ SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after GLUE with a new set of more difficult language understanding tasks, improved resources, and a new public leaderboard. """ a_ : Tuple = """ Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset. Args: predictions: list of predictions to score. Depending on the SuperGlUE subset: - for 'record': list of question-answer dictionaries with the following keys: - 'idx': index of the question as specified by the dataset - 'prediction_text': the predicted answer text - for 'multirc': list of question-answer dictionaries with the following keys: - 'idx': index of the question-answer pair as specified by the dataset - 'prediction': the predicted answer label - otherwise: list of predicted labels references: list of reference labels. Depending on the SuperGLUE subset: - for 'record': list of question-answers dictionaries with the following keys: - 'idx': index of the question as specified by the dataset - 'answers': list of possible answers - otherwise: list of reference labels Returns: depending on the SuperGLUE subset: - for 'record': - 'exact_match': Exact match between answer and gold answer - 'f1': F1 score - for 'multirc': - 'exact_match': Exact match between answer and gold answer - 'f1_m': Per-question macro-F1 score - 'f1_a': Average F1 score over all answers - for 'axb': 'matthews_correlation': Matthew Correlation - for 'cb': - 'accuracy': Accuracy - 'f1': F1 score - for all others: - 'accuracy': Accuracy Examples: >>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"] >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0} >>> super_glue_metric = datasets.load_metric('super_glue', 'cb') >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0, 'f1': 1.0} >>> super_glue_metric = datasets.load_metric('super_glue', 'record') >>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}] >>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'exact_match': 1.0, 'f1': 1.0} >>> super_glue_metric = datasets.load_metric('super_glue', 'multirc') >>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0} >>> super_glue_metric = datasets.load_metric('super_glue', 'axb') >>> references = [0, 1] >>> predictions = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'matthews_correlation': 1.0} """ def a_ ( __snake_case : Dict , __snake_case : Any ) -> int: """simple docstring""" return float((preds == labels).mean() ) def a_ ( __snake_case : List[Any] , __snake_case : Tuple , __snake_case : List[Any]="binary" ) -> List[str]: """simple docstring""" lowerCamelCase_ =simple_accuracy(__snake_case , __snake_case ) lowerCamelCase_ =float(fa_score(y_true=__snake_case , y_pred=__snake_case , average=__snake_case ) ) return { "accuracy": acc, "f1": fa, } def a_ ( __snake_case : Union[str, Any] , __snake_case : List[str] ) -> List[Any]: """simple docstring""" lowerCamelCase_ ={} for id_pred, label in zip(__snake_case , __snake_case ): lowerCamelCase_ =F'''{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}''' lowerCamelCase_ =id_pred['''prediction'''] if question_id in question_map: question_map[question_id].append((pred, label) ) else: lowerCamelCase_ =[(pred, label)] lowerCamelCase_, lowerCamelCase_ =[], [] for question, preds_labels in question_map.items(): lowerCamelCase_, lowerCamelCase_ =zip(*__snake_case ) lowerCamelCase_ =fa_score(y_true=__snake_case , y_pred=__snake_case , average='''macro''' ) fas.append(__snake_case ) lowerCamelCase_ =int(sum(pred == label for pred, label in preds_labels ) == len(__snake_case ) ) ems.append(__snake_case ) lowerCamelCase_ =float(sum(__snake_case ) / len(__snake_case ) ) lowerCamelCase_ =sum(__snake_case ) / len(__snake_case ) lowerCamelCase_ =float(fa_score(y_true=__snake_case , y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) ) return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a} @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __UpperCamelCase ( datasets.Metric ): def lowercase__ ( self ): """simple docstring""" if self.config_name not in [ "boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg", ]: raise KeyError( '''You should supply a configuration name selected in ''' '''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' ) return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(self._get_feature_types() ), codebase_urls=[], reference_urls=[], format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None, ) def lowercase__ ( self ): """simple docstring""" if self.config_name == "record": return { "predictions": { "idx": { "passage": datasets.Value('''int64''' ), "query": datasets.Value('''int64''' ), }, "prediction_text": datasets.Value('''string''' ), }, "references": { "idx": { "passage": datasets.Value('''int64''' ), "query": datasets.Value('''int64''' ), }, "answers": datasets.Sequence(datasets.Value('''string''' ) ), }, } elif self.config_name == "multirc": return { "predictions": { "idx": { "answer": datasets.Value('''int64''' ), "paragraph": datasets.Value('''int64''' ), "question": datasets.Value('''int64''' ), }, "prediction": datasets.Value('''int64''' ), }, "references": datasets.Value('''int64''' ), } else: return { "predictions": datasets.Value('''int64''' ), "references": datasets.Value('''int64''' ), } def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" if self.config_name == "axb": return {"matthews_correlation": matthews_corrcoef(lowerCAmelCase, lowerCAmelCase )} elif self.config_name == "cb": return acc_and_fa(lowerCAmelCase, lowerCAmelCase, fa_avg='''macro''' ) elif self.config_name == "record": lowerCamelCase_ =[ { '''qas''': [ {'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]} for ref in references ] } ] lowerCamelCase_ ={pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions} return evaluate_record(lowerCAmelCase, lowerCAmelCase )[0] elif self.config_name == "multirc": return evaluate_multirc(lowerCAmelCase, lowerCAmelCase ) elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]: return {"accuracy": simple_accuracy(lowerCAmelCase, lowerCAmelCase )} else: raise KeyError( '''You should supply a configuration name selected in ''' '''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
75
'''simple docstring''' a_ : Any = [ 9_99, 8_00, 7_99, 6_00, 5_99, 5_00, 4_00, 3_99, 3_77, 3_55, 3_33, 3_11, 2_88, 2_66, 2_44, 2_22, 2_00, 1_99, 1_77, 1_55, 1_33, 1_11, 88, 66, 44, 22, 0, ] a_ : Any = [ 9_99, 9_76, 9_52, 9_28, 9_05, 8_82, 8_58, 8_57, 8_10, 7_62, 7_15, 7_14, 5_72, 4_29, 4_28, 2_86, 2_85, 2_38, 1_90, 1_43, 1_42, 1_18, 95, 71, 47, 24, 0, ] a_ : Optional[Any] = [ 9_99, 9_88, 9_77, 9_66, 9_55, 9_44, 9_33, 9_22, 9_11, 9_00, 8_99, 8_79, 8_59, 8_40, 8_20, 8_00, 7_99, 7_66, 7_33, 7_00, 6_99, 6_50, 6_00, 5_99, 5_00, 4_99, 4_00, 3_99, 3_50, 3_00, 2_99, 2_66, 2_33, 2_00, 1_99, 1_79, 1_59, 1_40, 1_20, 1_00, 99, 88, 77, 66, 55, 44, 33, 22, 11, 0, ] a_ : str = [ 9_99, 9_95, 9_92, 9_89, 9_85, 9_81, 9_78, 9_75, 9_71, 9_67, 9_64, 9_61, 9_57, 9_56, 9_51, 9_47, 9_42, 9_37, 9_33, 9_28, 9_23, 9_19, 9_14, 9_13, 9_08, 9_03, 8_97, 8_92, 8_87, 8_81, 8_76, 8_71, 8_70, 8_64, 8_58, 8_52, 8_46, 8_40, 8_34, 8_28, 8_27, 8_20, 8_13, 8_06, 7_99, 7_92, 7_85, 7_84, 7_77, 7_70, 7_63, 7_56, 7_49, 7_42, 7_41, 7_33, 7_24, 7_16, 7_07, 6_99, 6_98, 6_88, 6_77, 6_66, 6_56, 6_55, 6_45, 6_34, 6_23, 6_13, 6_12, 5_98, 5_84, 5_70, 5_69, 5_55, 5_41, 5_27, 5_26, 5_05, 4_84, 4_83, 4_62, 4_40, 4_39, 3_96, 3_95, 3_52, 3_51, 3_08, 3_07, 2_64, 2_63, 2_20, 2_19, 1_76, 1_32, 88, 44, 0, ] a_ : Optional[int] = [ 9_99, 9_97, 9_95, 9_92, 9_90, 9_88, 9_86, 9_84, 9_81, 9_79, 9_77, 9_75, 9_72, 9_70, 9_68, 9_66, 9_64, 9_61, 9_59, 9_57, 9_56, 9_54, 9_51, 9_49, 9_46, 9_44, 9_41, 9_39, 9_36, 9_34, 9_31, 9_29, 9_26, 9_24, 9_21, 9_19, 9_16, 9_14, 9_13, 9_10, 9_07, 9_05, 9_02, 8_99, 8_96, 8_93, 8_91, 8_88, 8_85, 8_82, 8_79, 8_77, 8_74, 8_71, 8_70, 8_67, 8_64, 8_61, 8_58, 8_55, 8_52, 8_49, 8_46, 8_43, 8_40, 8_37, 8_34, 8_31, 8_28, 8_27, 8_24, 8_21, 8_17, 8_14, 8_11, 8_08, 8_04, 8_01, 7_98, 7_95, 7_91, 7_88, 7_85, 7_84, 7_80, 7_77, 7_74, 7_70, 7_66, 7_63, 7_60, 7_56, 7_52, 7_49, 7_46, 7_42, 7_41, 7_37, 7_33, 7_30, 7_26, 7_22, 7_18, 7_14, 7_10, 7_07, 7_03, 6_99, 6_98, 6_94, 6_90, 6_85, 6_81, 6_77, 6_73, 6_69, 6_64, 6_60, 6_56, 6_55, 6_50, 6_46, 6_41, 6_36, 6_32, 6_27, 6_22, 6_18, 6_13, 6_12, 6_07, 6_02, 5_96, 5_91, 5_86, 5_80, 5_75, 5_70, 5_69, 5_63, 5_57, 5_51, 5_45, 5_39, 5_33, 5_27, 5_26, 5_19, 5_12, 5_05, 4_98, 4_91, 4_84, 4_83, 4_74, 4_66, 4_57, 4_49, 4_40, 4_39, 4_28, 4_18, 4_07, 3_96, 3_95, 3_81, 3_66, 3_52, 3_51, 3_30, 3_08, 3_07, 2_86, 2_64, 2_63, 2_42, 2_20, 2_19, 1_76, 1_75, 1_32, 1_31, 88, 44, 0, ] a_ : Dict = [ 9_99, 9_91, 9_82, 9_74, 9_66, 9_58, 9_50, 9_41, 9_33, 9_25, 9_16, 9_08, 9_00, 8_99, 8_74, 8_50, 8_25, 8_00, 7_99, 7_00, 6_00, 5_00, 4_00, 3_00, 2_00, 1_00, 0, ] a_ : Tuple = [ 9_99, 9_92, 9_85, 9_78, 9_71, 9_64, 9_57, 9_49, 9_42, 9_35, 9_28, 9_21, 9_14, 9_07, 9_00, 8_99, 8_79, 8_59, 8_40, 8_20, 8_00, 7_99, 7_66, 7_33, 7_00, 6_99, 6_50, 6_00, 5_99, 5_00, 4_99, 4_00, 3_99, 3_00, 2_99, 2_00, 1_99, 1_00, 99, 0, ] a_ : Any = [ 9_99, 9_96, 9_92, 9_89, 9_85, 9_82, 9_79, 9_75, 9_72, 9_68, 9_65, 9_61, 9_58, 9_55, 9_51, 9_48, 9_44, 9_41, 9_38, 9_34, 9_31, 9_27, 9_24, 9_20, 9_17, 9_14, 9_10, 9_07, 9_03, 9_00, 8_99, 8_91, 8_84, 8_76, 8_69, 8_61, 8_53, 8_46, 8_38, 8_30, 8_23, 8_15, 8_08, 8_00, 7_99, 7_88, 7_77, 7_66, 7_55, 7_44, 7_33, 7_22, 7_11, 7_00, 6_99, 6_88, 6_77, 6_66, 6_55, 6_44, 6_33, 6_22, 6_11, 6_00, 5_99, 5_85, 5_71, 5_57, 5_42, 5_28, 5_14, 5_00, 4_99, 4_85, 4_71, 4_57, 4_42, 4_28, 4_14, 4_00, 3_99, 3_79, 3_59, 3_40, 3_20, 3_00, 2_99, 2_79, 2_59, 2_40, 2_20, 2_00, 1_99, 1_66, 1_33, 1_00, 99, 66, 33, 0, ]
75
1
'''simple docstring''' def a_ ( __snake_case : int = 400_0000 ) -> int: """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_, lowerCamelCase_ =0, 1 while b <= n: if b % 2 == 0: even_fibs.append(__snake_case ) lowerCamelCase_, lowerCamelCase_ =b, a + b return sum(__snake_case ) if __name__ == "__main__": print(F"""{solution() = }""")
75
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) a_ : Union[str, Any] = { """configuration_funnel""": ["""FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FunnelConfig"""], """convert_funnel_original_tf_checkpoint_to_pytorch""": [], """tokenization_funnel""": ["""FunnelTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : List[str] = ["""FunnelTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[int] = [ """FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""", """FunnelBaseModel""", """FunnelForMaskedLM""", """FunnelForMultipleChoice""", """FunnelForPreTraining""", """FunnelForQuestionAnswering""", """FunnelForSequenceClassification""", """FunnelForTokenClassification""", """FunnelModel""", """FunnelPreTrainedModel""", """load_tf_weights_in_funnel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[Any] = [ """TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFFunnelBaseModel""", """TFFunnelForMaskedLM""", """TFFunnelForMultipleChoice""", """TFFunnelForPreTraining""", """TFFunnelForQuestionAnswering""", """TFFunnelForSequenceClassification""", """TFFunnelForTokenClassification""", """TFFunnelModel""", """TFFunnelPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig from .tokenization_funnel import FunnelTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_funnel_fast import FunnelTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_funnel import ( FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, FunnelBaseModel, FunnelForMaskedLM, FunnelForMultipleChoice, FunnelForPreTraining, FunnelForQuestionAnswering, FunnelForSequenceClassification, FunnelForTokenClassification, FunnelModel, FunnelPreTrainedModel, load_tf_weights_in_funnel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_funnel import ( TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, TFFunnelPreTrainedModel, ) else: import sys a_ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
75
1
'''simple docstring''' # Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position a_ : Dict = """2.13.1""" import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse("""3.7"""): raise ImportWarning( """To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition.""" ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( """To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n""" """If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.""" ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip a_ : Any = concatenate_datasets a_ : int = DownloadConfig a_ : Union[str, Any] = DownloadManager a_ : List[Any] = DownloadMode a_ : Optional[Any] = DownloadConfig a_ : Any = DownloadMode a_ : Optional[int] = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
75
'''simple docstring''' import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoProcessor from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wavaveca.test_feature_extraction_wavaveca import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput if is_torch_available(): from transformers import WavaVecaForCTC @require_pyctcdecode class __UpperCamelCase ( unittest.TestCase ): def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split() lowerCamelCase_ =dict(zip(lowerCAmelCase, range(len(lowerCAmelCase ) ) ) ) lowerCamelCase_ ={ '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>''', } lowerCamelCase_ ={ '''feature_size''': 1, '''padding_value''': 0.0, '''sampling_rate''': 16_000, '''return_attention_mask''': False, '''do_normalize''': True, } lowerCamelCase_ =tempfile.mkdtemp() lowerCamelCase_ =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCamelCase_ =os.path.join(self.tmpdirname, lowerCAmelCase ) with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp: fp.write(json.dumps(lowerCAmelCase ) + '''\n''' ) with open(self.feature_extraction_file, '''w''', encoding='''utf-8''' ) as fp: fp.write(json.dumps(lowerCAmelCase ) + '''\n''' ) # load decoder from hub lowerCamelCase_ ='''hf-internal-testing/ngram-beam-search-decoder''' def lowercase__ ( self, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.add_kwargs_tokens_map.copy() kwargs.update(lowerCAmelCase ) return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase ) def lowercase__ ( self, **lowerCAmelCase ): """simple docstring""" return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname, **lowerCAmelCase ) def lowercase__ ( self, **lowerCAmelCase ): """simple docstring""" return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name, **lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_feature_extractor() lowerCamelCase_ =self.get_decoder() lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname ) # tokenizer self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer, lowerCAmelCase ) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor, lowerCAmelCase ) # decoder self.assertEqual(processor.decoder._alphabet.labels, decoder._alphabet.labels ) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set, decoder.model_container[decoder._model_key]._unigram_set, ) self.assertIsInstance(processor.decoder, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =WavaVecaProcessorWithLM( tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor(), decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname ) # make sure that error is thrown when decoder alphabet doesn't match lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained( self.tmpdirname, alpha=5.0, beta=3.0, score_boundary=-7.0, unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha, 5.0 ) self.assertEqual(processor.language_model.beta, 3.0 ) self.assertEqual(processor.language_model.score_boundary, -7.0 ) self.assertEqual(processor.language_model.unk_score_offset, 3 ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(['''xx'''] ) with self.assertRaisesRegex(lowerCAmelCase, '''include''' ): WavaVecaProcessorWithLM( tokenizer=lowerCAmelCase, feature_extractor=self.get_feature_extractor(), decoder=self.get_decoder() ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_feature_extractor() lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_decoder() lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase ) lowerCamelCase_ =floats_list((3, 1_000) ) lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''np''' ) lowerCamelCase_ =processor(lowerCAmelCase, return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2 ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_feature_extractor() lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_decoder() lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase ) lowerCamelCase_ ='''This is a test string''' lowerCamelCase_ =processor(text=lowerCAmelCase ) lowerCamelCase_ =tokenizer(lowerCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key] ) def lowercase__ ( self, lowerCAmelCase=(2, 10, 16), lowerCAmelCase=77 ): """simple docstring""" np.random.seed(lowerCAmelCase ) return np.random.rand(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_feature_extractor() lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_decoder() lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase ) lowerCamelCase_ =self._get_dummy_logits(shape=(10, 16), seed=13 ) lowerCamelCase_ =processor.decode(lowerCAmelCase ) lowerCamelCase_ =decoder.decode_beams(lowerCAmelCase )[0] self.assertEqual(decoded_decoder[0], decoded_processor.text ) self.assertEqual('''</s> <s> </s>''', decoded_processor.text ) self.assertEqual(decoded_decoder[-2], decoded_processor.logit_score ) self.assertEqual(decoded_decoder[-1], decoded_processor.lm_score ) @parameterized.expand([[None], ['''fork'''], ['''spawn''']] ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.get_feature_extractor() lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_decoder() lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase ) lowerCamelCase_ =self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: lowerCamelCase_ =processor.batch_decode(lowerCAmelCase ) else: with get_context(lowerCAmelCase ).Pool() as pool: lowerCamelCase_ =processor.batch_decode(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =list(lowerCAmelCase ) with get_context('''fork''' ).Pool() as p: lowerCamelCase_ =decoder.decode_beams_batch(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =[], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0] ) logit_scores_decoder.append(beams[0][-2] ) lm_scores_decoder.append(beams[0][-1] ) self.assertListEqual(lowerCAmelCase, decoded_processor.text ) self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''], decoded_processor.text ) self.assertListEqual(lowerCAmelCase, decoded_processor.logit_score ) self.assertListEqual(lowerCAmelCase, decoded_processor.lm_score ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_feature_extractor() lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_decoder() lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase ) lowerCamelCase_ =self._get_dummy_logits() lowerCamelCase_ =15 lowerCamelCase_ =-2_0.0 lowerCamelCase_ =-4.0 lowerCamelCase_ =processor.batch_decode( lowerCAmelCase, beam_width=lowerCAmelCase, beam_prune_logp=lowerCAmelCase, token_min_logp=lowerCAmelCase, ) lowerCamelCase_ =decoded_processor_out.text lowerCamelCase_ =list(lowerCAmelCase ) with get_context('''fork''' ).Pool() as pool: lowerCamelCase_ =decoder.decode_beams_batch( lowerCAmelCase, lowerCAmelCase, beam_width=lowerCAmelCase, beam_prune_logp=lowerCAmelCase, token_min_logp=lowerCAmelCase, ) lowerCamelCase_ =[d[0][0] for d in decoded_decoder_out] lowerCamelCase_ =[d[0][2] for d in decoded_decoder_out] lowerCamelCase_ =[d[0][3] for d in decoded_decoder_out] self.assertListEqual(lowerCAmelCase, lowerCAmelCase ) self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''], lowerCAmelCase ) self.assertTrue(np.array_equal(lowerCAmelCase, decoded_processor_out.logit_score ) ) self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7], lowerCAmelCase, atol=1e-3 ) ) self.assertTrue(np.array_equal(lowerCAmelCase, decoded_processor_out.lm_score ) ) self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4], lowerCAmelCase, atol=1e-3 ) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_feature_extractor() lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_decoder() lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase ) lowerCamelCase_ =self._get_dummy_logits() lowerCamelCase_ =2.0 lowerCamelCase_ =5.0 lowerCamelCase_ =-2_0.0 lowerCamelCase_ =True lowerCamelCase_ =processor.batch_decode( lowerCAmelCase, alpha=lowerCAmelCase, beta=lowerCAmelCase, unk_score_offset=lowerCAmelCase, lm_score_boundary=lowerCAmelCase, ) lowerCamelCase_ =decoded_processor_out.text lowerCamelCase_ =list(lowerCAmelCase ) decoder.reset_params( alpha=lowerCAmelCase, beta=lowerCAmelCase, unk_score_offset=lowerCAmelCase, lm_score_boundary=lowerCAmelCase, ) with get_context('''fork''' ).Pool() as pool: lowerCamelCase_ =decoder.decode_beams_batch( lowerCAmelCase, lowerCAmelCase, ) lowerCamelCase_ =[d[0][0] for d in decoded_decoder_out] self.assertListEqual(lowerCAmelCase, lowerCAmelCase ) self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''], lowerCAmelCase ) lowerCamelCase_ =processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha, 2.0 ) self.assertEqual(lm_model.beta, 5.0 ) self.assertEqual(lm_model.unk_score_offset, -2_0.0 ) self.assertEqual(lm_model.score_boundary, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) lowerCamelCase_ =processor.decoder.model_container[processor.decoder._model_key] lowerCamelCase_ =Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() lowerCamelCase_ =os.listdir(lowerCAmelCase ) lowerCamelCase_ =['''alphabet.json''', '''language_model'''] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(lowerCAmelCase, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =snapshot_download('''hf-internal-testing/processor_with_lm''' ) lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained(lowerCAmelCase ) lowerCamelCase_ =processor.decoder.model_container[processor.decoder._model_key] lowerCamelCase_ =Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() lowerCamelCase_ =os.listdir(lowerCAmelCase ) lowerCamelCase_ =os.listdir(lowerCAmelCase ) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(lowerCAmelCase, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) lowerCamelCase_ =AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' ) lowerCamelCase_ =floats_list((3, 1_000) ) lowerCamelCase_ =processor_wavaveca(lowerCAmelCase, return_tensors='''np''' ) lowerCamelCase_ =processor_auto(lowerCAmelCase, return_tensors='''np''' ) for key in input_wavaveca.keys(): self.assertAlmostEqual(input_wavaveca[key].sum(), input_auto[key].sum(), delta=1e-2 ) lowerCamelCase_ =self._get_dummy_logits() lowerCamelCase_ =processor_wavaveca.batch_decode(lowerCAmelCase ) lowerCamelCase_ =processor_auto.batch_decode(lowerCAmelCase ) self.assertListEqual(decoded_wavaveca.text, decoded_auto.text ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_feature_extractor() lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_decoder() lowerCamelCase_ =WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase, feature_extractor=lowerCAmelCase, decoder=lowerCAmelCase ) self.assertListEqual( processor.model_input_names, feature_extractor.model_input_names, msg='''`processor` and `feature_extractor` model input names do not match''', ) @staticmethod def lowercase__ ( lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =[d[key] for d in offsets] return retrieved_list def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) lowerCamelCase_ =self._get_dummy_logits()[0] lowerCamelCase_ =processor.decode(lowerCAmelCase, output_word_offsets=lowerCAmelCase ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ), 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(lowerCAmelCase, lowerCAmelCase ) ) self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''], '''word''' ) ), outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''], '''word''' ), ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''], '''start_offset''' ), [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''], '''end_offset''' ), [1, 3, 5] ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) lowerCamelCase_ =self._get_dummy_logits() lowerCamelCase_ =processor.batch_decode(lowerCAmelCase, output_word_offsets=lowerCAmelCase ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ), 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(lowerCAmelCase, lowerCAmelCase ) ) self.assertListEqual( [''' '''.join(self.get_from_offsets(lowerCAmelCase, '''word''' ) ) for o in outputs['''word_offsets''']], outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0], '''word''' ), ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0], '''start_offset''' ), [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0], '''end_offset''' ), [1, 3, 5] ) @slow @require_torch @require_torchaudio def lowercase__ ( self ): """simple docstring""" import torch lowerCamelCase_ =load_dataset('''common_voice''', '''en''', split='''train''', streaming=lowerCAmelCase ) lowerCamelCase_ =ds.cast_column('''audio''', datasets.Audio(sampling_rate=16_000 ) ) lowerCamelCase_ =iter(lowerCAmelCase ) lowerCamelCase_ =next(lowerCAmelCase ) lowerCamelCase_ =AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) lowerCamelCase_ =WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train lowerCamelCase_ =processor(sample['''audio''']['''array'''], return_tensors='''pt''' ).input_values with torch.no_grad(): lowerCamelCase_ =model(lowerCAmelCase ).logits.cpu().numpy() lowerCamelCase_ =processor.decode(logits[0], output_word_offsets=lowerCAmelCase ) lowerCamelCase_ =model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate lowerCamelCase_ =[ { '''start_time''': d['''start_offset'''] * time_offset, '''end_time''': d['''end_offset'''] * time_offset, '''word''': d['''word'''], } for d in output['''word_offsets'''] ] lowerCamelCase_ ='''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL''' # output words self.assertEqual(''' '''.join(self.get_from_offsets(lowerCAmelCase, '''word''' ) ), lowerCAmelCase ) self.assertEqual(''' '''.join(self.get_from_offsets(lowerCAmelCase, '''word''' ) ), output.text ) # output times lowerCamelCase_ =torch.tensor(self.get_from_offsets(lowerCAmelCase, '''start_time''' ) ) lowerCamelCase_ =torch.tensor(self.get_from_offsets(lowerCAmelCase, '''end_time''' ) ) # fmt: off lowerCamelCase_ =torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] ) lowerCamelCase_ =torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] ) # fmt: on self.assertTrue(torch.allclose(lowerCAmelCase, lowerCAmelCase, atol=0.0_1 ) ) self.assertTrue(torch.allclose(lowerCAmelCase, lowerCAmelCase, atol=0.0_1 ) )
75
1