code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
import itertools import string from collections.abc import Generator, Iterable def A ( __UpperCamelCase , __UpperCamelCase ) -> Generator[tuple[str, ...], None, None]: A__ = iter(__UpperCamelCase ) while True: A__ = tuple(itertools.islice(__UpperCamelCase , __UpperCamelCase ) ) if not chunk: return yield chunk def A ( __UpperCamelCase ) -> str: A__ = ''.join([c.upper() for c in dirty if c in string.ascii_letters] ) A__ = '' if len(__UpperCamelCase ) < 2: return dirty for i in range(len(__UpperCamelCase ) - 1 ): clean += dirty[i] if dirty[i] == dirty[i + 1]: clean += "X" clean += dirty[-1] if len(__UpperCamelCase ) & 1: clean += "X" return clean def A ( __UpperCamelCase ) -> list[str]: # I and J are used interchangeably to allow # us to use a 5x5 table (25 letters) A__ = 'ABCDEFGHIKLMNOPQRSTUVWXYZ' # we're using a list instead of a '2d' array because it makes the math # for setting up the table and doing the actual encoding/decoding simpler A__ = [] # copy key chars into the table if they are in `alphabet` ignoring duplicates for char in key.upper(): if char not in table and char in alphabet: table.append(__UpperCamelCase ) # fill the rest of the table in with the remaining alphabet chars for char in alphabet: if char not in table: table.append(__UpperCamelCase ) return table def A ( __UpperCamelCase , __UpperCamelCase ) -> str: A__ = generate_table(__UpperCamelCase ) A__ = prepare_input(__UpperCamelCase ) A__ = '' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(__UpperCamelCase , 2 ): A__ , A__ = divmod(table.index(__UpperCamelCase ) , 5 ) A__ , A__ = divmod(table.index(__UpperCamelCase ) , 5 ) if rowa == rowa: ciphertext += table[rowa * 5 + (cola + 1) % 5] ciphertext += table[rowa * 5 + (cola + 1) % 5] elif cola == cola: ciphertext += table[((rowa + 1) % 5) * 5 + cola] ciphertext += table[((rowa + 1) % 5) * 5 + cola] else: # rectangle ciphertext += table[rowa * 5 + cola] ciphertext += table[rowa * 5 + cola] return ciphertext def A ( __UpperCamelCase , __UpperCamelCase ) -> str: A__ = generate_table(__UpperCamelCase ) A__ = '' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(__UpperCamelCase , 2 ): A__ , A__ = divmod(table.index(__UpperCamelCase ) , 5 ) A__ , A__ = divmod(table.index(__UpperCamelCase ) , 5 ) if rowa == rowa: plaintext += table[rowa * 5 + (cola - 1) % 5] plaintext += table[rowa * 5 + (cola - 1) % 5] elif cola == cola: plaintext += table[((rowa - 1) % 5) * 5 + cola] plaintext += table[((rowa - 1) % 5) * 5 + cola] else: # rectangle plaintext += table[rowa * 5 + cola] plaintext += table[rowa * 5 + cola] return plaintext
703
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available SCREAMING_SNAKE_CASE__ = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ '''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MraForMaskedLM''', '''MraForMultipleChoice''', '''MraForQuestionAnswering''', '''MraForSequenceClassification''', '''MraForTokenClassification''', '''MraLayer''', '''MraModel''', '''MraPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
52
0
from itertools import permutations def A ( __UpperCamelCase ) -> bool: if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False A__ = [7, 11, 13, 17] for i, test in enumerate(__UpperCamelCase ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def A ( __UpperCamelCase = 10 ) -> int: return sum( int(''.join(map(__UpperCamelCase , __UpperCamelCase ) ) ) for num in permutations(range(__UpperCamelCase ) ) if is_substring_divisible(__UpperCamelCase ) ) if __name__ == "__main__": print(f'{solution() = }')
704
import webbrowser from sys import argv from urllib.parse import parse_qs, quote import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: '''))) print('''Googling.....''') SCREAMING_SNAKE_CASE__ = f'https://www.google.com/search?q={query}&num=100' SCREAMING_SNAKE_CASE__ = requests.get( url, headers={'''User-Agent''': str(UserAgent().random)}, ) try: SCREAMING_SNAKE_CASE__ = ( BeautifulSoup(res.text, '''html.parser''') .find('''div''', attrs={'''class''': '''yuRUbf'''}) .find('''a''') .get('''href''') ) except AttributeError: SCREAMING_SNAKE_CASE__ = parse_qs( BeautifulSoup(res.text, '''html.parser''') .find('''div''', attrs={'''class''': '''kCrYT'''}) .find('''a''') .get('''href''') )['''url'''][0] webbrowser.open(link)
52
0
def A ( __UpperCamelCase = 10**12 ) -> int: A__ = 1 A__ = 0 A__ = 1 A__ = 1 while numerator <= 2 * min_total - 1: prev_numerator += 2 * numerator numerator += 2 * prev_numerator prev_denominator += 2 * denominator denominator += 2 * prev_denominator return (denominator + 1) // 2 if __name__ == "__main__": print(f'{solution() = }')
705
import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" A__ : Any = IFInpaintingPipeline A__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"} A__ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS A__ : Dict = PipelineTesterMixin.required_optional_params - {"latents"} def _a ( self : Any ): """simple docstring""" return self._get_dummy_components() def _a ( self : Optional[int] , _snake_case : Any , _snake_case : str=0 ): """simple docstring""" if str(_snake_case ).startswith('mps' ): A__ = torch.manual_seed(_snake_case ) else: A__ = torch.Generator(device=_snake_case ).manual_seed(_snake_case ) A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case ) A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case ) A__ = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'mask_image': mask_image, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def _a ( self : Dict ): """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def _a ( self : int ): """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' ) def _a ( self : Optional[int] ): """simple docstring""" super().test_save_load_floataa(expected_max_diff=1E-1 ) def _a ( self : List[str] ): """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def _a ( self : Dict ): """simple docstring""" self._test_save_load_local() def _a ( self : Optional[int] ): """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
52
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE__ = { '''configuration_xlm_roberta''': [ '''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMRobertaConfig''', '''XLMRobertaOnnxConfig''', ], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = ['''XLMRobertaTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = ['''XLMRobertaTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ '''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XLMRobertaForCausalLM''', '''XLMRobertaForMaskedLM''', '''XLMRobertaForMultipleChoice''', '''XLMRobertaForQuestionAnswering''', '''XLMRobertaForSequenceClassification''', '''XLMRobertaForTokenClassification''', '''XLMRobertaModel''', '''XLMRobertaPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ '''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXLMRobertaForCausalLM''', '''TFXLMRobertaForMaskedLM''', '''TFXLMRobertaForMultipleChoice''', '''TFXLMRobertaForQuestionAnswering''', '''TFXLMRobertaForSequenceClassification''', '''TFXLMRobertaForTokenClassification''', '''TFXLMRobertaModel''', '''TFXLMRobertaPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ '''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FlaxXLMRobertaForMaskedLM''', '''FlaxXLMRobertaForCausalLM''', '''FlaxXLMRobertaForMultipleChoice''', '''FlaxXLMRobertaForQuestionAnswering''', '''FlaxXLMRobertaForSequenceClassification''', '''FlaxXLMRobertaForTokenClassification''', '''FlaxXLMRobertaModel''', '''FlaxXLMRobertaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig, XLMRobertaOnnxConfig, ) try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta import XLMRobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaForCausalLM, XLMRobertaForMaskedLM, XLMRobertaForMultipleChoice, XLMRobertaForQuestionAnswering, XLMRobertaForSequenceClassification, XLMRobertaForTokenClassification, XLMRobertaModel, XLMRobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm_roberta import ( TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMRobertaForCausalLM, TFXLMRobertaForMaskedLM, TFXLMRobertaForMultipleChoice, TFXLMRobertaForQuestionAnswering, TFXLMRobertaForSequenceClassification, TFXLMRobertaForTokenClassification, TFXLMRobertaModel, TFXLMRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xlm_roberta import ( FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxXLMRobertaForCausalLM, FlaxXLMRobertaForMaskedLM, FlaxXLMRobertaForMultipleChoice, FlaxXLMRobertaForQuestionAnswering, FlaxXLMRobertaForSequenceClassification, FlaxXLMRobertaForTokenClassification, FlaxXLMRobertaModel, FlaxXLMRobertaPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
706
import inspect import jax import jax.lax as lax import jax.numpy as jnp from ..utils import add_start_docstrings from ..utils.logging import get_logger SCREAMING_SNAKE_CASE__ = get_logger(__name__) SCREAMING_SNAKE_CASE__ = r''' Args: input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search kwargs (`Dict[str, Any]`, *optional*): Additional logits processor specific kwargs. Return: `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores. ''' class __lowerCAmelCase : """simple docstring""" @add_start_docstrings(_snake_case ) def __call__( self : Optional[int] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray ): """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class __lowerCAmelCase : """simple docstring""" @add_start_docstrings(_snake_case ) def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray ): """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" @add_start_docstrings(_snake_case ) def __call__( self : Any , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int , **_snake_case : Optional[int] ): """simple docstring""" for processor in self: A__ = inspect.signature(processor.__call__ ).parameters if len(_snake_case ) > 3: if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ): raise ValueError( F'''Make sure that all the required parameters: {list(function_args.keys() )} for ''' F'''{processor.__class__} are passed to the logits processor.''' ) A__ = processor(_snake_case , _snake_case , _snake_case , **_snake_case ) else: A__ = processor(_snake_case , _snake_case , _snake_case ) return scores class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : Any , _snake_case : float ): """simple docstring""" if not isinstance(_snake_case , _snake_case ) or not (temperature > 0): raise ValueError(F'''`temperature` has to be a strictly positive float, but is {temperature}''' ) A__ = temperature def __call__( self : str , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ): """simple docstring""" A__ = scores / self.temperature return scores class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : Optional[Any] , _snake_case : float , _snake_case : float = -float('Inf' ) , _snake_case : int = 1 ): """simple docstring""" if not isinstance(_snake_case , _snake_case ) or (top_p < 0 or top_p > 1.0): raise ValueError(F'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' ) if not isinstance(_snake_case , _snake_case ) or (min_tokens_to_keep < 1): raise ValueError(F'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' ) A__ = top_p A__ = filter_value A__ = min_tokens_to_keep def __call__( self : str , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ): """simple docstring""" A__ , A__ = lax.top_k(_snake_case , scores.shape[-1] ) A__ = jnp.full_like(_snake_case , self.filter_value ) A__ = jax.nn.softmax(_snake_case , axis=-1 ).cumsum(axis=-1 ) A__ = cumulative_probs < self.top_p # include the token that is higher than top_p as well A__ = jnp.roll(_snake_case , 1 ) score_mask |= score_mask.at[:, 0].set(_snake_case ) # min tokens to keep A__ = score_mask.at[:, : self.min_tokens_to_keep].set(_snake_case ) A__ = jnp.where(_snake_case , _snake_case , _snake_case ) A__ = jax.lax.sort_key_val(_snake_case , _snake_case )[-1] return next_scores class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : Union[str, Any] , _snake_case : int , _snake_case : float = -float('Inf' ) , _snake_case : int = 1 ): """simple docstring""" if not isinstance(_snake_case , _snake_case ) or top_k <= 0: raise ValueError(F'''`top_k` has to be a strictly positive integer, but is {top_k}''' ) A__ = max(_snake_case , _snake_case ) A__ = filter_value def __call__( self : Optional[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ): """simple docstring""" A__ , A__ = scores.shape A__ = jnp.full(batch_size * vocab_size , self.filter_value ) A__ = min(self.top_k , scores.shape[-1] ) # Safety check A__ , A__ = lax.top_k(_snake_case , _snake_case ) A__ = jnp.broadcast_to((jnp.arange(_snake_case ) * vocab_size)[:, None] , (batch_size, topk) ).flatten() A__ = topk_scores.flatten() A__ = topk_indices.flatten() + shift A__ = next_scores_flat.at[topk_indices_flat].set(_snake_case ) A__ = next_scores_flat.reshape(_snake_case , _snake_case ) return next_scores class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : Any , _snake_case : int ): """simple docstring""" A__ = bos_token_id def __call__( self : Optional[int] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ): """simple docstring""" A__ = jnp.full(scores.shape , -float('inf' ) ) A__ = 1 - jnp.bool_(cur_len - 1 ) A__ = jnp.where(_snake_case , new_scores.at[:, self.bos_token_id].set(0 ) , _snake_case ) return scores class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : Any , _snake_case : int , _snake_case : int ): """simple docstring""" A__ = max_length A__ = eos_token_id def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ): """simple docstring""" A__ = jnp.full(scores.shape , -float('inf' ) ) A__ = 1 - jnp.bool_(cur_len - self.max_length + 1 ) A__ = jnp.where(_snake_case , new_scores.at[:, self.eos_token_id].set(0 ) , _snake_case ) return scores class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : Dict , _snake_case : int , _snake_case : int ): """simple docstring""" if not isinstance(_snake_case , _snake_case ) or min_length < 0: raise ValueError(F'''`min_length` has to be a positive integer, but is {min_length}''' ) if not isinstance(_snake_case , _snake_case ) or eos_token_id < 0: raise ValueError(F'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' ) A__ = min_length A__ = eos_token_id def __call__( self : int , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ): """simple docstring""" A__ = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 ) A__ = jnp.where(_snake_case , scores.at[:, self.eos_token_id].set(-float('inf' ) ) , _snake_case ) return scores class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : int , _snake_case : Tuple , _snake_case : Union[str, Any] ): """simple docstring""" A__ = list(_snake_case ) A__ = begin_index def __call__( self : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : str , _snake_case : int ): """simple docstring""" A__ = 1 - jnp.bool_(cur_len - self.begin_index ) A__ = jnp.where(_snake_case , scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) , _snake_case ) return scores class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : int , _snake_case : list ): """simple docstring""" A__ = list(_snake_case ) def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ): """simple docstring""" A__ = scores.at[..., self.suppress_tokens].set(-float('inf' ) ) return scores class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : List[str] , _snake_case : Optional[Any] ): """simple docstring""" A__ = dict(_snake_case ) # Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the # index of the array corresponds to the index of the token to be forced, for XLA compatibility. # Indexes without forced tokens will have a negative value. A__ = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1 for index, token in force_token_map.items(): if token is not None: A__ = force_token_array.at[index].set(_snake_case ) A__ = jnp.intaa(_snake_case ) def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ): """simple docstring""" def _force_token(_snake_case : Dict ): A__ = scores.shape[0] A__ = self.force_token_array[generation_idx] A__ = jnp.ones_like(_snake_case , dtype=scores.dtype ) * -float('inf' ) A__ = jnp.zeros((batch_size, 1) , dtype=scores.dtype ) A__ = lax.dynamic_update_slice(_snake_case , _snake_case , (0, current_token) ) return new_scores A__ = lax.cond( cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond( self.force_token_array[cur_len] >= 0 , lambda: _force_token(_snake_case ) , lambda: scores , ) , ) return scores class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : List[Any] ): """simple docstring""" A__ = generate_config.eos_token_id A__ = generate_config.no_timestamps_token_id A__ = generate_config.no_timestamps_token_id + 1 A__ = decoder_input_length + 1 if generate_config.is_multilingual: # room for language token and task token self.begin_index += 2 if hasattr(_snake_case , 'max_initial_timestamp_index' ): A__ = generate_config.max_initial_timestamp_index else: A__ = model_config.vocab_size if self.max_initial_timestamp_index is None: A__ = model_config.vocab_size def __call__( self : Tuple , _snake_case : List[Any] , _snake_case : Dict , _snake_case : Dict ): """simple docstring""" A__ = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) ) def handle_pairs(_snake_case : Dict , _snake_case : str ): A__ = jnp.where((cur_len - self.begin_index) >= 1 , _snake_case , _snake_case ) A__ = jnp.where( input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , _snake_case , ) A__ = jnp.where((cur_len - self.begin_index) < 2 , _snake_case , _snake_case ) A__ = jnp.where( input_ids_k[cur_len - 2] >= self.timestamp_begin , _snake_case , _snake_case , ) return jnp.where( _snake_case , jnp.where( penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) , scores_k.at[: self.eos_token_id].set(-float('inf' ) ) , ) , _snake_case , ) A__ = jax.vmap(_snake_case )(_snake_case , _snake_case ) A__ = jnp.where(cur_len == self.begin_index , _snake_case , _snake_case ) A__ = jnp.where( self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , _snake_case , ) A__ = self.timestamp_begin + self.max_initial_timestamp_index A__ = jnp.where( _snake_case , scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) , _snake_case , ) # if sum of probability over timestamps is above any other token, sample timestamp A__ = jax.nn.log_softmax(_snake_case , axis=-1 ) def handle_cumulative_probs(_snake_case : List[Any] , _snake_case : Union[str, Any] ): A__ = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 ) A__ = jnp.max(logprobs_k[: self.timestamp_begin] ) return jnp.where( timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) , _snake_case , ) A__ = jax.vmap(_snake_case )(_snake_case , _snake_case ) return scores
52
0
import argparse import torch from transformers import ( EncodecConfig, EncodecFeatureExtractor, EncodecModel, logging, ) # checkpoints downloaded from: # https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th # https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin # https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ = logging.get_logger('''transformers.models.encodec''') SCREAMING_SNAKE_CASE__ = { '''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''', '''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''', '''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''', '''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''', } SCREAMING_SNAKE_CASE__ = { '''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''', '''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''', '''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''', '''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''', '''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''', '''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''', '''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''', '''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''', '''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''', '''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''', '''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''', '''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''', '''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''', '''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''', '''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''', '''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''', '''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''', '''encoder.model.13.lstm''': '''encoder.layers.13.lstm''', '''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''', } SCREAMING_SNAKE_CASE__ = { '''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''', '''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''', '''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''', '''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''', '''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''', '''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''', '''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''', '''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''', '''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''', '''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''', '''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''', '''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''', '''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''', '''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''', '''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''', '''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''', '''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''', '''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''', } SCREAMING_SNAKE_CASE__ = { '''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''', '''decoder.model.1.lstm''': '''decoder.layers.1.lstm''', '''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''', '''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''', '''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''', '''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''', '''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''', '''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''', '''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''', '''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''', '''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''', '''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''', '''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''', '''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''', '''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''', '''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''', '''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''', '''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''', '''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''', } SCREAMING_SNAKE_CASE__ = { '''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''', '''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''', '''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''', '''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''', '''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''', '''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''', '''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''', '''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''', '''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''', '''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''', '''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''', '''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''', '''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''', '''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''', '''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''', '''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''', '''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''', '''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''', } SCREAMING_SNAKE_CASE__ = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_DECODER, } SCREAMING_SNAKE_CASE__ = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_ENCODER_48K, **MAPPING_DECODER, **MAPPING_DECODER_48K, } SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = [] def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Any: for attribute in key.split('.' ): A__ = getattr(__UpperCamelCase , __UpperCamelCase ) if weight_type is not None: A__ = getattr(__UpperCamelCase , __UpperCamelCase ).shape else: A__ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": A__ = value elif weight_type == "weight_g": A__ = value elif weight_type == "weight_v": A__ = value elif weight_type == "bias": A__ = value elif weight_type == "running_mean": A__ = value elif weight_type == "running_var": A__ = value elif weight_type == "num_batches_tracked": A__ = value elif weight_type == "weight_ih_l0": A__ = value elif weight_type == "weight_hh_l0": A__ = value elif weight_type == "bias_ih_l0": A__ = value elif weight_type == "bias_hh_l0": A__ = value elif weight_type == "weight_ih_l1": A__ = value elif weight_type == "weight_hh_l1": A__ = value elif weight_type == "bias_ih_l1": A__ = value elif weight_type == "bias_hh_l1": A__ = value else: A__ = value logger.info(f'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' ) def A ( __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]: for key in ignore_keys: if key.endswith('.*' ): if name.startswith(key[:-1] ): return True elif ".*." in key: A__ , A__ = key.split('.*.' ) if prefix in name and suffix in name: return True elif key in name: return True return False def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Any: A__ = [] if model_name == "encodec_24khz" or "encodec_32khz": A__ = MAPPING_24K elif model_name == "encodec_48khz": A__ = MAPPING_48K else: raise ValueError(f'''Unsupported model: {model_name}''' ) for name, value in orig_dict.items(): if should_ignore(__UpperCamelCase , __UpperCamelCase ): logger.info(f'''{name} was ignored''' ) continue A__ = False for key, mapped_key in MAPPING.items(): if "*" in key: A__ , A__ = key.split('.*.' ) if prefix in name and suffix in name: A__ = suffix if key in name: # HACK otherwise .embed gets initialized with .embed_avg too if key.endswith('embed' ) and name.endswith('embed_avg' ): continue A__ = True if "*" in mapped_key: A__ = name.split(__UpperCamelCase )[0].split('.' )[-2] A__ = mapped_key.replace('*' , __UpperCamelCase ) if "weight_g" in name: A__ = 'weight_g' elif "weight_v" in name: A__ = 'weight_v' elif "weight_ih_l0" in name: A__ = 'weight_ih_l0' elif "weight_hh_l0" in name: A__ = 'weight_hh_l0' elif "bias_ih_l0" in name: A__ = 'bias_ih_l0' elif "bias_hh_l0" in name: A__ = 'bias_hh_l0' elif "weight_ih_l1" in name: A__ = 'weight_ih_l1' elif "weight_hh_l1" in name: A__ = 'weight_hh_l1' elif "bias_ih_l1" in name: A__ = 'bias_ih_l1' elif "bias_hh_l1" in name: A__ = 'bias_hh_l1' elif "bias" in name: A__ = 'bias' elif "weight" in name: A__ = 'weight' elif "running_mean" in name: A__ = 'running_mean' elif "running_var" in name: A__ = 'running_var' elif "num_batches_tracked" in name: A__ = 'num_batches_tracked' else: A__ = None set_recursively(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) continue if not is_used: unused_weights.append(__UpperCamelCase ) logger.warning(f'''Unused weights: {unused_weights}''' ) @torch.no_grad() def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , ) -> int: if config_path is not None: A__ = EncodecConfig.from_pretrained(__UpperCamelCase ) else: A__ = EncodecConfig() if model_name == "encodec_24khz": pass # config is already correct elif model_name == "encodec_32khz": A__ = [8, 5, 4, 4] A__ = [2.2] A__ = 64 A__ = 32_000 A__ = 2_048 A__ = False A__ = False A__ = False elif model_name == "encodec_48khz": A__ = [8, 5, 4, 2] A__ = [3.0, 6.0, 12.0, 24.0] A__ = 48_000 A__ = 2 A__ = False A__ = 'time_group_norm' A__ = True A__ = 1.0 A__ = 0.01 else: raise ValueError(f'''Unknown model name: {model_name}''' ) A__ = EncodecModel(__UpperCamelCase ) A__ = EncodecFeatureExtractor( feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , ) feature_extractor.save_pretrained(__UpperCamelCase ) A__ = torch.load(__UpperCamelCase ) if "best_state" in original_checkpoint: # we might have a training state saved, in which case discard the yaml results and just retain the weights A__ = original_checkpoint['best_state'] recursively_load_weights(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) model.save_pretrained(__UpperCamelCase ) if repo_id: print('Pushing to the hub...' ) feature_extractor.push_to_hub(__UpperCamelCase ) model.push_to_hub(__UpperCamelCase ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() parser.add_argument( '''--model''', default='''encodec_24khz''', type=str, help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''', ) parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) SCREAMING_SNAKE_CASE__ = parser.parse_args() convert_checkpoint( args.model, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
707
import argparse import struct import unittest class __lowerCAmelCase : """simple docstring""" def __init__( self : List[str] , _snake_case : bytes ): """simple docstring""" A__ = data # Initialize hash values A__ = [ 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A, 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19, ] # Initialize round constants A__ = [ 0x428A2F98, 0x71374491, 0xB5C0FBCF, 0xE9B5DBA5, 0x3956C25B, 0x59F111F1, 0x923F82A4, 0xAB1C5ED5, 0xD807AA98, 0x12835B01, 0x243185BE, 0x550C7DC3, 0x72BE5D74, 0x80DEB1FE, 0x9BDC06A7, 0xC19BF174, 0xE49B69C1, 0xEFBE4786, 0x0FC19DC6, 0x240CA1CC, 0x2DE92C6F, 0x4A7484AA, 0x5CB0A9DC, 0x76F988DA, 0x983E5152, 0xA831C66D, 0xB00327C8, 0xBF597FC7, 0xC6E00BF3, 0xD5A79147, 0x06CA6351, 0x14292967, 0x27B70A85, 0x2E1B2138, 0x4D2C6DFC, 0x53380D13, 0x650A7354, 0x766A0ABB, 0x81C2C92E, 0x92722C85, 0xA2BFE8A1, 0xA81A664B, 0xC24B8B70, 0xC76C51A3, 0xD192E819, 0xD6990624, 0xF40E3585, 0x106AA070, 0x19A4C116, 0x1E376C08, 0x2748774C, 0x34B0BCB5, 0x391C0CB3, 0x4ED8AA4A, 0x5B9CCA4F, 0x682E6FF3, 0x748F82EE, 0x78A5636F, 0x84C87814, 0x8CC70208, 0x90BEFFFA, 0xA4506CEB, 0xBEF9A3F7, 0xC67178F2, ] A__ = self.preprocessing(self.data ) self.final_hash() @staticmethod def _a ( _snake_case : bytes ): """simple docstring""" A__ = B'\x80' + (B'\x00' * (63 - (len(_snake_case ) + 8) % 64)) A__ = struct.pack('>Q' , (len(_snake_case ) * 8) ) return data + padding + big_endian_integer def _a ( self : Optional[int] ): """simple docstring""" A__ = [ self.preprocessed_data[x : x + 64] for x in range(0 , len(self.preprocessed_data ) , 64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers A__ = list(struct.unpack('>16L' , _snake_case ) ) # add 48 0-ed integers words += [0] * 48 A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = self.hashes for index in range(0 , 64 ): if index > 15: # modify the zero-ed indexes at the end of the array A__ = ( self.ror(words[index - 15] , 7 ) ^ self.ror(words[index - 15] , 18 ) ^ (words[index - 15] >> 3) ) A__ = ( self.ror(words[index - 2] , 17 ) ^ self.ror(words[index - 2] , 19 ) ^ (words[index - 2] >> 10) ) A__ = ( words[index - 16] + sa + words[index - 7] + sa ) % 0x100000000 # Compression A__ = self.ror(_snake_case , 6 ) ^ self.ror(_snake_case , 11 ) ^ self.ror(_snake_case , 25 ) A__ = (e & f) ^ ((~e & 0xFFFFFFFF) & g) A__ = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0x100000000 A__ = self.ror(_snake_case , 2 ) ^ self.ror(_snake_case , 13 ) ^ self.ror(_snake_case , 22 ) A__ = (a & b) ^ (a & c) ^ (b & c) A__ = (sa + maj) % 0x100000000 A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = ( g, f, e, ((d + tempa) % 0x100000000), c, b, a, ((tempa + tempa) % 0x100000000), ) A__ = [a, b, c, d, e, f, g, h] # Modify final values A__ = [ ((element + mutated_hash_values[index]) % 0x100000000) for index, element in enumerate(self.hashes ) ] A__ = ''.join([hex(_snake_case )[2:].zfill(8 ) for value in self.hashes] ) def _a ( self : Dict , _snake_case : int , _snake_case : int ): """simple docstring""" return 0xFFFFFFFF & (value << (32 - rotations)) | (value >> rotations) class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def _a ( self : str ): """simple docstring""" import hashlib A__ = bytes('Test String' , 'utf-8' ) self.assertEqual(SHAaaa(_snake_case ).hash , hashlib.shaaaa(_snake_case ).hexdigest() ) def A ( ) -> None: import doctest doctest.testmod() A__ = argparse.ArgumentParser() parser.add_argument( '-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , ) parser.add_argument( '-f' , '--file' , dest='input_file' , help='Hash contents of a file' ) A__ = parser.parse_args() A__ = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , 'rb' ) as f: A__ = f.read() else: A__ = bytes(__UpperCamelCase , 'utf-8' ) print(SHAaaa(__UpperCamelCase ).hash ) if __name__ == "__main__": main()
52
0
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__) def A ( __UpperCamelCase , __UpperCamelCase ) -> List[Any]: return (preds == labels).mean() @dataclass class __lowerCAmelCase : """simple docstring""" A__ : str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) A__ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) A__ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) A__ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) @dataclass class __lowerCAmelCase : """simple docstring""" A__ : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} ) A__ : str = field(metadata={"help": "Should contain the data files for the task."} ) A__ : int = field( default=1_28 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) A__ : bool = field( default=UpperCAmelCase_ , metadata={"help": "Overwrite the cached training and evaluation sets"} ) def A ( ) -> Any: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) A__ , A__ , A__ = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' ' --overwrite_output_dir to overcome.' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , __UpperCamelCase ) # Set seed set_seed(training_args.seed ) try: A__ = processors[data_args.task_name]() A__ = processor.get_labels() A__ = len(__UpperCamelCase ) except KeyError: raise ValueError('Task not found: %s' % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. A__ = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__UpperCamelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) A__ = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) A__ = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , ) # Get datasets A__ = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=__UpperCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) A__ = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=__UpperCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(__UpperCamelCase ) -> Dict: A__ = np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(__UpperCamelCase , p.label_ids )} # Data collator A__ = DataCollatorWithPadding(__UpperCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer A__ = Trainer( model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=__UpperCamelCase , eval_dataset=__UpperCamelCase , compute_metrics=__UpperCamelCase , data_collator=__UpperCamelCase , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation A__ = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) A__ = trainer.evaluate() A__ = os.path.join(training_args.output_dir , 'eval_results.txt' ) if trainer.is_world_master(): with open(__UpperCamelCase , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(' %s = %s' , __UpperCamelCase , __UpperCamelCase ) writer.write('%s = %s\n' % (key, value) ) results.update(__UpperCamelCase ) return results def A ( __UpperCamelCase ) -> List[Any]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
708
import math import random def A ( __UpperCamelCase , __UpperCamelCase = False ) -> float: if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value )) # Initial Value SCREAMING_SNAKE_CASE__ = 0.02 def A ( __UpperCamelCase , __UpperCamelCase ) -> float: A__ = float(2 * (random.randint(1 , 100 )) - 1 ) for _ in range(__UpperCamelCase ): # Forward propagation A__ = sigmoid_function(INITIAL_VALUE * weight ) # How much did we miss? A__ = (expected / 100) - layer_a # Error delta A__ = layer_1_error * sigmoid_function(__UpperCamelCase , __UpperCamelCase ) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_a * 100 if __name__ == "__main__": import doctest doctest.testmod() SCREAMING_SNAKE_CASE__ = int(input('''Expected value: ''')) SCREAMING_SNAKE_CASE__ = int(input('''Number of propagations: ''')) print(forward_propagation(expected, number_propagations))
52
0
from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
709
import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def _a ( self : int ): """simple docstring""" A__ = FlaxMTaForConditionalGeneration.from_pretrained('google/mt5-small' ) A__ = AutoTokenizer.from_pretrained('google/mt5-small' ) A__ = tokenizer('Hello there' , return_tensors='np' ).input_ids A__ = tokenizer('Hi I am' , return_tensors='np' ).input_ids A__ = shift_tokens_right(_snake_case , model.config.pad_token_id , model.config.decoder_start_token_id ) A__ = model(_snake_case , decoder_input_ids=_snake_case ).logits A__ = optax.softmax_cross_entropy(_snake_case , onehot(_snake_case , logits.shape[-1] ) ).mean() A__ = -(labels.shape[-1] * loss.item()) A__ = -84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
52
0
from __future__ import annotations SCREAMING_SNAKE_CASE__ = { '''A''': ['''B''', '''C''', '''E'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F''', '''G'''], '''D''': ['''B'''], '''E''': ['''A''', '''B''', '''D'''], '''F''': ['''C'''], '''G''': ['''C'''], } class __lowerCAmelCase : """simple docstring""" def __init__( self : List[Any] , _snake_case : dict[str, list[str]] , _snake_case : str ): """simple docstring""" A__ = graph # mapping node to its parent in resulting breadth first tree A__ = {} A__ = source_vertex def _a ( self : Tuple ): """simple docstring""" A__ = {self.source_vertex} A__ = None A__ = [self.source_vertex] # first in first out queue while queue: A__ = queue.pop(0 ) for adjacent_vertex in self.graph[vertex]: if adjacent_vertex not in visited: visited.add(_snake_case ) A__ = vertex queue.append(_snake_case ) def _a ( self : str , _snake_case : str ): """simple docstring""" if target_vertex == self.source_vertex: return self.source_vertex A__ = self.parent.get(_snake_case ) if target_vertex_parent is None: A__ = ( F'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}''' ) raise ValueError(_snake_case ) return self.shortest_path(_snake_case ) + F'''->{target_vertex}''' if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = Graph(graph, '''G''') g.breath_first_search() print(g.shortest_path('''D''')) print(g.shortest_path('''G''')) print(g.shortest_path('''Foo'''))
710
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''', '''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''', } class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" A__ : List[str] = "roberta" def __init__( self : List[str] , _snake_case : Union[str, Any]=5_02_65 , _snake_case : List[Any]=7_68 , _snake_case : List[str]=12 , _snake_case : List[str]=12 , _snake_case : Any=30_72 , _snake_case : Union[str, Any]="gelu" , _snake_case : int=0.1 , _snake_case : Union[str, Any]=0.1 , _snake_case : Tuple=5_12 , _snake_case : Union[str, Any]=2 , _snake_case : Any=0.02 , _snake_case : Any=1E-12 , _snake_case : List[Any]=1 , _snake_case : int=0 , _snake_case : Any=2 , _snake_case : Optional[Any]="absolute" , _snake_case : int=True , _snake_case : Any=None , **_snake_case : Any , ): """simple docstring""" super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case ) A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = hidden_act A__ = intermediate_size A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_vocab_size A__ = initializer_range A__ = layer_norm_eps A__ = position_embedding_type A__ = use_cache A__ = classifier_dropout class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" @property def _a ( self : Dict ): """simple docstring""" if self.task == "multiple-choice": A__ = {0: 'batch', 1: 'choice', 2: 'sequence'} else: A__ = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
52
0
import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers SCREAMING_SNAKE_CASE__ = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append('''dataclasses''') if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append('''importlib_metadata''') for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(f'can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py') def A ( __UpperCamelCase , __UpperCamelCase=None ) -> Dict: require_version(deps[pkg] , __UpperCamelCase )
711
import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" A__ : int = LongformerTokenizer A__ : Optional[int] = True A__ : Any = LongformerTokenizerFast A__ : Dict = True def _a ( self : int ): """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt A__ = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] A__ = dict(zip(_snake_case , range(len(_snake_case ) ) ) ) A__ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] A__ = {'unk_token': '<unk>'} A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(_snake_case ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(_snake_case ) ) def _a ( self : int , **_snake_case : Union[str, Any] ): """simple docstring""" kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **_snake_case ) def _a ( self : Optional[int] , **_snake_case : List[Any] ): """simple docstring""" kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_snake_case ) def _a ( self : Any , _snake_case : Optional[Any] ): """simple docstring""" A__ = 'lower newer' A__ = 'lower newer' return input_text, output_text def _a ( self : Any ): """simple docstring""" A__ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) A__ = 'lower newer' A__ = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er'] A__ = tokenizer.tokenize(_snake_case ) # , add_prefix_space=True) self.assertListEqual(_snake_case , _snake_case ) A__ = tokens + [tokenizer.unk_token] A__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , _snake_case ) def _a ( self : List[str] ): """simple docstring""" A__ = self.get_tokenizer() self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=_snake_case ) , [0, 3_14_14, 2_32, 3_28, 2] ) self.assertListEqual( tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=_snake_case ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , ) @slow def _a ( self : List[Any] ): """simple docstring""" A__ = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' ) A__ = tokenizer.encode('sequence builders' , add_special_tokens=_snake_case ) A__ = tokenizer.encode('multi-sequence build' , add_special_tokens=_snake_case ) A__ = tokenizer.encode( 'sequence builders' , add_special_tokens=_snake_case , add_prefix_space=_snake_case ) A__ = tokenizer.encode( 'sequence builders' , 'multi-sequence build' , add_special_tokens=_snake_case , add_prefix_space=_snake_case ) A__ = tokenizer.build_inputs_with_special_tokens(_snake_case ) A__ = tokenizer.build_inputs_with_special_tokens(_snake_case , _snake_case ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def _a ( self : List[str] ): """simple docstring""" A__ = self.get_tokenizer() A__ = 'Encode this sequence.' A__ = tokenizer.byte_encoder[' '.encode('utf-8' )[0]] # Testing encoder arguments A__ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case , add_prefix_space=_snake_case ) A__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(_snake_case , _snake_case ) A__ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case , add_prefix_space=_snake_case ) A__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(_snake_case , _snake_case ) tokenizer.add_special_tokens({'bos_token': '<s>'} ) A__ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case ) A__ = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(_snake_case , _snake_case ) # Testing spaces after special tokens A__ = '<mask>' tokenizer.add_special_tokens( {'mask_token': AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case )} ) # mask token has a left space A__ = tokenizer.convert_tokens_to_ids(_snake_case ) A__ = 'Encode <mask> sequence' A__ = 'Encode <mask>sequence' A__ = tokenizer.encode(_snake_case ) A__ = encoded.index(_snake_case ) A__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(_snake_case , _snake_case ) A__ = tokenizer.encode(_snake_case ) A__ = encoded.index(_snake_case ) A__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(_snake_case , _snake_case ) def _a ( self : Dict ): """simple docstring""" pass def _a ( self : Union[str, Any] ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): A__ = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case ) A__ = self.tokenizer_class.from_pretrained(_snake_case , **_snake_case ) A__ = 'A, <mask> AllenNLP sentence.' A__ = tokenizer_r.encode_plus(_snake_case , add_special_tokens=_snake_case , return_token_type_ids=_snake_case ) A__ = tokenizer_p.encode_plus(_snake_case , add_special_tokens=_snake_case , return_token_type_ids=_snake_case ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , ) A__ = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] ) A__ = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( _snake_case , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) self.assertSequenceEqual( _snake_case , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) def _a ( self : List[Any] ): """simple docstring""" for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): A__ = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case ) A__ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) A__ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state['add_prefix_space'] , _snake_case ) self.assertEqual(post_processor_state['add_prefix_space'] , _snake_case ) self.assertEqual(post_processor_state['trim_offsets'] , _snake_case ) def _a ( self : Optional[Any] ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): A__ = 'hello' # `hello` is a token in the vocabulary of `pretrained_name` A__ = F'''{text_of_1_token} {text_of_1_token}''' A__ = self.rust_tokenizer_class.from_pretrained( _snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case ) A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_snake_case ) + 1, len(_snake_case ) + 1 + len(_snake_case )) , ) A__ = self.rust_tokenizer_class.from_pretrained( _snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case ) A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_snake_case ) + 1, len(_snake_case ) + 1 + len(_snake_case )) , ) A__ = self.rust_tokenizer_class.from_pretrained( _snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case ) A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_snake_case ), len(_snake_case ) + 1 + len(_snake_case )) , ) A__ = self.rust_tokenizer_class.from_pretrained( _snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case ) A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_snake_case ), len(_snake_case ) + 1 + len(_snake_case )) , ) A__ = F''' {text}''' # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) A__ = self.rust_tokenizer_class.from_pretrained( _snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case ) A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_snake_case )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(_snake_case ) + 1, 1 + len(_snake_case ) + 1 + len(_snake_case )) , ) A__ = self.rust_tokenizer_class.from_pretrained( _snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case ) A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_snake_case )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(_snake_case ), 1 + len(_snake_case ) + 1 + len(_snake_case )) , ) A__ = self.rust_tokenizer_class.from_pretrained( _snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case ) A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_snake_case )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(_snake_case ), 1 + len(_snake_case ) + 1 + len(_snake_case )) , )
52
0
'''simple docstring''' from __future__ import annotations from typing import Any class __lowerCAmelCase : """simple docstring""" def __init__( self : str , _snake_case : int , _snake_case : int , _snake_case : float = 0 ): """simple docstring""" A__ , A__ = row, column A__ = [[default_value for c in range(_snake_case )] for r in range(_snake_case )] def __str__( self : List[Any] ): """simple docstring""" A__ = F'''Matrix consist of {self.row} rows and {self.column} columns\n''' # Make string identifier A__ = 0 for row_vector in self.array: for obj in row_vector: A__ = max(_snake_case , len(str(_snake_case ) ) ) A__ = F'''%{max_element_length}s''' # Make string and return def single_line(_snake_case : list[float] ) -> str: nonlocal string_format_identifier A__ = '[' line += ", ".join(string_format_identifier % (obj,) for obj in row_vector ) line += "]" return line s += "\n".join(single_line(_snake_case ) for row_vector in self.array ) return s def __repr__( self : Union[str, Any] ): """simple docstring""" return str(self ) def _a ( self : List[Any] , _snake_case : tuple[int, int] ): """simple docstring""" if not (isinstance(_snake_case , (list, tuple) ) and len(_snake_case ) == 2): return False elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column): return False else: return True def __getitem__( self : List[str] , _snake_case : tuple[int, int] ): """simple docstring""" assert self.validate_indicies(_snake_case ) return self.array[loc[0]][loc[1]] def __setitem__( self : Dict , _snake_case : tuple[int, int] , _snake_case : float ): """simple docstring""" assert self.validate_indicies(_snake_case ) A__ = value def __add__( self : Optional[int] , _snake_case : Matrix ): """simple docstring""" assert isinstance(_snake_case , _snake_case ) assert self.row == another.row and self.column == another.column # Add A__ = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): A__ = self[r, c] + another[r, c] return result def __neg__( self : Tuple ): """simple docstring""" A__ = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): A__ = -self[r, c] return result def __sub__( self : Tuple , _snake_case : Matrix ): """simple docstring""" return self + (-another) def __mul__( self : str , _snake_case : int | float | Matrix ): """simple docstring""" if isinstance(_snake_case , (int, float) ): # Scalar multiplication A__ = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): A__ = self[r, c] * another return result elif isinstance(_snake_case , _snake_case ): # Matrix multiplication assert self.column == another.row A__ = Matrix(self.row , another.column ) for r in range(self.row ): for c in range(another.column ): for i in range(self.column ): result[r, c] += self[r, i] * another[i, c] return result else: A__ = F'''Unsupported type given for another ({type(_snake_case )})''' raise TypeError(_snake_case ) def _a ( self : List[Any] ): """simple docstring""" A__ = Matrix(self.column , self.row ) for r in range(self.row ): for c in range(self.column ): A__ = self[r, c] return result def _a ( self : List[Any] , _snake_case : Matrix , _snake_case : Matrix ): """simple docstring""" assert isinstance(_snake_case , _snake_case ) and isinstance(_snake_case , _snake_case ) assert self.row == self.column == u.row == v.row # u, v should be column vector assert u.column == v.column == 1 # u, v should be column vector # Calculate A__ = v.transpose() A__ = (v_t * self * u)[0, 0] + 1 if numerator_factor == 0: return None # It's not invertable return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor)) # Testing if __name__ == "__main__": def A ( ) -> None: # a^(-1) A__ = Matrix(3 , 3 , 0 ) for i in range(3 ): A__ = 1 print(f'''a^(-1) is {ainv}''' ) # u, v A__ = Matrix(3 , 1 , 0 ) A__ , A__ , A__ = 1, 2, -3 A__ = Matrix(3 , 1 , 0 ) A__ , A__ , A__ = 4, -2, 5 print(f'''u is {u}''' ) print(f'''v is {v}''' ) print(f'''uv^T is {u * v.transpose()}''' ) # Sherman Morrison print(f'''(a + uv^T)^(-1) is {ainv.sherman_morrison(__UpperCamelCase , __UpperCamelCase )}''' ) def A ( ) -> None: import doctest doctest.testmod() testa()
712
import pytest import datasets # Import fixture modules as plugins SCREAMING_SNAKE_CASE__ = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec'''] def A ( __UpperCamelCase , __UpperCamelCase ) -> Optional[int]: # Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit") for item in items: if any(marker in item.keywords for marker in ['integration', 'unit'] ): continue item.add_marker(pytest.mark.unit ) def A ( __UpperCamelCase ) -> str: config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' ) @pytest.fixture(autouse=__UpperCamelCase ) def A ( __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]: # test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work? A__ = tmp_path_factory.getbasetemp() / 'cache' A__ = test_hf_cache_home / 'datasets' A__ = test_hf_cache_home / 'metrics' A__ = test_hf_cache_home / 'modules' monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(__UpperCamelCase ) ) monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(__UpperCamelCase ) ) monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(__UpperCamelCase ) ) A__ = test_hf_datasets_cache / 'downloads' monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(__UpperCamelCase ) ) A__ = test_hf_datasets_cache / 'downloads' / 'extracted' monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(__UpperCamelCase ) ) @pytest.fixture(autouse=__UpperCamelCase , scope='session' ) def A ( ) -> Union[str, Any]: datasets.disable_progress_bar() @pytest.fixture(autouse=__UpperCamelCase ) def A ( __UpperCamelCase ) -> int: # don't take tests into account when counting downloads monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , __UpperCamelCase ) @pytest.fixture def A ( __UpperCamelCase ) -> Any: # Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0 # To be removed once SQLAlchemy 2.0 supported monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , __UpperCamelCase )
52
0
import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @property def _a ( self : Optional[Any] ): """simple docstring""" torch.manual_seed(0 ) A__ = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , ) return model def _a ( self : List[str] ): """simple docstring""" A__ = self.dummy_uncond_unet A__ = ScoreSdeVeScheduler() A__ = ScoreSdeVePipeline(unet=_snake_case , scheduler=_snake_case ) sde_ve.to(_snake_case ) sde_ve.set_progress_bar_config(disable=_snake_case ) A__ = torch.manual_seed(0 ) A__ = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=_snake_case ).images A__ = torch.manual_seed(0 ) A__ = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=_snake_case , return_dict=_snake_case )[ 0 ] A__ = image[0, -3:, -3:, -1] A__ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) A__ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def _a ( self : Union[str, Any] ): """simple docstring""" A__ = 'google/ncsnpp-church-256' A__ = UNetaDModel.from_pretrained(_snake_case ) A__ = ScoreSdeVeScheduler.from_pretrained(_snake_case ) A__ = ScoreSdeVePipeline(unet=_snake_case , scheduler=_snake_case ) sde_ve.to(_snake_case ) sde_ve.set_progress_bar_config(disable=_snake_case ) A__ = torch.manual_seed(0 ) A__ = sde_ve(num_inference_steps=10 , output_type='numpy' , generator=_snake_case ).images A__ = image[0, -3:, -3:, -1] assert image.shape == (1, 2_56, 2_56, 3) A__ = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
713
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def A ( __UpperCamelCase , __UpperCamelCase ) -> Tuple: A__ = args.log_outputs A__ = '_'.join(args.dataset.split('/' ) + [args.config, args.split] ) # load metric A__ = load_metric('wer' ) A__ = load_metric('cer' ) # compute metrics A__ = wer.compute(references=result['target'] , predictions=result['prediction'] ) A__ = cer.compute(references=result['target'] , predictions=result['prediction'] ) # print & log results A__ = f'''WER: {wer_result}\nCER: {cer_result}''' print(__UpperCamelCase ) with open(f'''{dataset_id}_eval_results.txt''' , 'w' ) as f: f.write(__UpperCamelCase ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: A__ = f'''log_{dataset_id}_predictions.txt''' A__ = f'''log_{dataset_id}_targets.txt''' with open(__UpperCamelCase , 'w' ) as p, open(__UpperCamelCase , 'w' ) as t: # mapping function to write output def write_to_file(__UpperCamelCase , __UpperCamelCase ): p.write(f'''{i}''' + '\n' ) p.write(batch['prediction'] + '\n' ) t.write(f'''{i}''' + '\n' ) t.write(batch['target'] + '\n' ) result.map(__UpperCamelCase , with_indices=__UpperCamelCase ) def A ( __UpperCamelCase ) -> str: A__ = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training A__ = re.sub(__UpperCamelCase , '' , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! A__ = ['\n\n', '\n', ' ', ' '] for t in token_sequences_to_ignore: A__ = ' '.join(text.split(__UpperCamelCase ) ) return text def A ( __UpperCamelCase ) -> Union[str, Any]: # load dataset A__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=__UpperCamelCase ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor A__ = AutoFeatureExtractor.from_pretrained(args.model_id ) A__ = feature_extractor.sampling_rate # resample audio A__ = dataset.cast_column('audio' , Audio(sampling_rate=__UpperCamelCase ) ) # load eval pipeline if args.device is None: A__ = 0 if torch.cuda.is_available() else -1 A__ = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(__UpperCamelCase ): A__ = asr( batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) A__ = prediction['text'] A__ = normalize_text(batch['sentence'] ) return batch # run inference on all examples A__ = dataset.map(__UpperCamelCase , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(__UpperCamelCase , __UpperCamelCase ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() parser.add_argument( '''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers''' ) parser.add_argument( '''--dataset''', type=str, required=True, help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''', ) parser.add_argument( '''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice''' ) parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''') parser.add_argument( '''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.''' ) parser.add_argument( '''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.''' ) parser.add_argument( '''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.''' ) parser.add_argument( '''--device''', type=int, default=None, help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''', ) SCREAMING_SNAKE_CASE__ = parser.parse_args() main(args)
52
0
import os import re import shutil from argparse import ArgumentParser, Namespace from datasets.commands import BaseDatasetsCLICommand from datasets.utils.logging import get_logger SCREAMING_SNAKE_CASE__ = '''<<<<<<< This should probably be modified because it mentions: ''' SCREAMING_SNAKE_CASE__ = '''======= >>>>>>> ''' SCREAMING_SNAKE_CASE__ = [ '''TextEncoderConfig''', '''ByteTextEncoder''', '''SubwordTextEncoder''', '''encoder_config''', '''maybe_build_from_corpus''', '''manual_dir''', ] SCREAMING_SNAKE_CASE__ = [ # (pattern, replacement) # Order is important here for some replacements (r'''tfds\.core''', r'''datasets'''), (r'''tf\.io\.gfile\.GFile''', r'''open'''), (r'''tf\.([\w\d]+)''', r'''datasets.Value(\'\1\')'''), (r'''tfds\.features\.Text\(\)''', r'''datasets.Value(\'string\')'''), (r'''tfds\.features\.Text\(''', r'''datasets.Value(\'string\'),'''), (r'''features\s*=\s*tfds.features.FeaturesDict\(''', r'''features=datasets.Features('''), (r'''tfds\.features\.FeaturesDict\(''', r'''dict('''), (r'''The TensorFlow Datasets Authors''', r'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''), (r'''tfds\.''', r'''datasets.'''), (r'''dl_manager\.manual_dir''', r'''self.config.data_dir'''), (r'''self\.builder_config''', r'''self.config'''), ] def A ( __UpperCamelCase ) -> Optional[Any]: return ConvertCommand(args.tfds_path , args.datasets_directory ) class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" @staticmethod def _a ( _snake_case : ArgumentParser ): """simple docstring""" A__ = parser.add_parser( 'convert' , help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' , ) train_parser.add_argument( '--tfds_path' , type=_snake_case , required=_snake_case , help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' , ) train_parser.add_argument( '--datasets_directory' , type=_snake_case , required=_snake_case , help='Path to the HuggingFace Datasets folder.' ) train_parser.set_defaults(func=_snake_case ) def __init__( self : str , _snake_case : str , _snake_case : str , *_snake_case : Dict ): """simple docstring""" A__ = get_logger('datasets-cli/converting' ) A__ = tfds_path A__ = datasets_directory def _a ( self : Dict ): """simple docstring""" if os.path.isdir(self._tfds_path ): A__ = os.path.abspath(self._tfds_path ) elif os.path.isfile(self._tfds_path ): A__ = os.path.dirname(self._tfds_path ) else: raise ValueError('--tfds_path is neither a directory nor a file. Please check path.' ) A__ = os.path.abspath(self._datasets_directory ) self._logger.info(F'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' ) A__ = [] A__ = [] A__ = {} if os.path.isdir(self._tfds_path ): A__ = os.listdir(_snake_case ) else: A__ = [os.path.basename(self._tfds_path )] for f_name in file_names: self._logger.info(F'''Looking at file {f_name}''' ) A__ = os.path.join(_snake_case , _snake_case ) A__ = os.path.join(_snake_case , _snake_case ) if not os.path.isfile(_snake_case ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name: self._logger.info('Skipping file' ) continue with open(_snake_case , encoding='utf-8' ) as f: A__ = f.readlines() A__ = [] A__ = False A__ = False A__ = [] for line in lines: A__ = line # Convert imports if "import tensorflow.compat.v2 as tf" in out_line: continue elif "@tfds.core" in out_line: continue elif "builder=self" in out_line: continue elif "import tensorflow_datasets.public_api as tfds" in out_line: A__ = 'import datasets\n' elif "import tensorflow" in out_line: # order is important here A__ = '' continue elif "from absl import logging" in out_line: A__ = 'from datasets import logging\n' elif "getLogger" in out_line: A__ = out_line.replace('getLogger' , 'get_logger' ) elif any(expression in out_line for expression in TO_HIGHLIGHT ): A__ = True A__ = list(filter(lambda _snake_case : e in out_line , _snake_case ) ) out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_snake_case ) + '\n' ) out_lines.append(_snake_case ) out_lines.append(_snake_case ) continue else: for pattern, replacement in TO_CONVERT: A__ = re.sub(_snake_case , _snake_case , _snake_case ) # Take care of saving utilities (to later move them together with main script) if "tensorflow_datasets" in out_line: A__ = re.match(R'from\stensorflow_datasets.*import\s([^\.\r\n]+)' , _snake_case ) tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(',' ) ) A__ = 'from . import ' + match.group(1 ) # Check we have not forget anything if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line: raise ValueError(F'''Error converting {out_line.strip()}''' ) if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line: A__ = True out_lines.append(_snake_case ) if is_builder or "wmt" in f_name: # We create a new directory for each dataset A__ = f_name.replace('.py' , '' ) A__ = os.path.join(_snake_case , _snake_case ) A__ = os.path.join(_snake_case , _snake_case ) os.makedirs(_snake_case , exist_ok=_snake_case ) self._logger.info(F'''Adding directory {output_dir}''' ) imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} ) else: # Utilities will be moved at the end utils_files.append(_snake_case ) if needs_manual_update: with_manual_update.append(_snake_case ) with open(_snake_case , 'w' , encoding='utf-8' ) as f: f.writelines(_snake_case ) self._logger.info(F'''Converted in {output_file}''' ) for utils_file in utils_files: try: A__ = os.path.basename(_snake_case ) A__ = imports_to_builder_map[f_name.replace('.py' , '' )] self._logger.info(F'''Moving {dest_folder} to {utils_file}''' ) shutil.copy(_snake_case , _snake_case ) except KeyError: self._logger.error(F'''Cannot find destination folder for {utils_file}. Please copy manually.''' ) if with_manual_update: for file_path in with_manual_update: self._logger.warning( F'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
714
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) def A ( __UpperCamelCase ) -> YolosConfig: A__ = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: A__ = 192 A__ = 768 A__ = 12 A__ = 3 A__ = [800, 1_333] A__ = False elif yolos_name == "yolos_s_dWr": A__ = 330 A__ = 14 A__ = 6 A__ = 1_320 elif "yolos_s" in yolos_name: A__ = 384 A__ = 1_536 A__ = 12 A__ = 6 elif "yolos_b" in yolos_name: A__ = [800, 1_344] A__ = 91 A__ = 'huggingface/label-files' A__ = 'coco-detection-id2label.json' A__ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='dataset' ) , 'r' ) ) A__ = {int(__UpperCamelCase ): v for k, v in idalabel.items()} A__ = idalabel A__ = {v: k for k, v in idalabel.items()} return config def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ) -> str: for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) A__ = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' ) A__ = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict A__ = in_proj_weight[: config.hidden_size, :] A__ = in_proj_bias[: config.hidden_size] A__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] A__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] A__ = in_proj_weight[-config.hidden_size :, :] A__ = in_proj_bias[-config.hidden_size :] def A ( __UpperCamelCase ) -> str: if "backbone" in name: A__ = name.replace('backbone' , 'vit' ) if "cls_token" in name: A__ = name.replace('cls_token' , 'embeddings.cls_token' ) if "det_token" in name: A__ = name.replace('det_token' , 'embeddings.detection_tokens' ) if "mid_pos_embed" in name: A__ = name.replace('mid_pos_embed' , 'encoder.mid_position_embeddings' ) if "pos_embed" in name: A__ = name.replace('pos_embed' , 'embeddings.position_embeddings' ) if "patch_embed.proj" in name: A__ = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) if "blocks" in name: A__ = name.replace('blocks' , 'encoder.layer' ) if "attn.proj" in name: A__ = name.replace('attn.proj' , 'attention.output.dense' ) if "attn" in name: A__ = name.replace('attn' , 'attention.self' ) if "norm1" in name: A__ = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: A__ = name.replace('norm2' , 'layernorm_after' ) if "mlp.fc1" in name: A__ = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: A__ = name.replace('mlp.fc2' , 'output.dense' ) if "class_embed" in name: A__ = name.replace('class_embed' , 'class_labels_classifier' ) if "bbox_embed" in name: A__ = name.replace('bbox_embed' , 'bbox_predictor' ) if "vit.norm" in name: A__ = name.replace('vit.norm' , 'vit.layernorm' ) return name def A ( __UpperCamelCase , __UpperCamelCase ) -> dict: for key in orig_state_dict.copy().keys(): A__ = orig_state_dict.pop(__UpperCamelCase ) if "qkv" in key: A__ = key.split('.' ) A__ = int(key_split[2] ) A__ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: A__ = val[:dim, :] A__ = val[ dim : dim * 2, : ] A__ = val[-dim:, :] else: A__ = val[:dim] A__ = val[dim : dim * 2] A__ = val[-dim:] else: A__ = val return orig_state_dict def A ( ) -> torch.Tensor: A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg' A__ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw ) return im @torch.no_grad() def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ) -> List[str]: A__ = get_yolos_config(__UpperCamelCase ) # load original state_dict A__ = torch.load(__UpperCamelCase , map_location='cpu' )['model'] # load 🤗 model A__ = YolosForObjectDetection(__UpperCamelCase ) model.eval() A__ = convert_state_dict(__UpperCamelCase , __UpperCamelCase ) model.load_state_dict(__UpperCamelCase ) # Check outputs on an image, prepared by YolosImageProcessor A__ = 800 if yolos_name != 'yolos_ti' else 512 A__ = YolosImageProcessor(format='coco_detection' , size=__UpperCamelCase ) A__ = image_processor(images=prepare_img() , return_tensors='pt' ) A__ = model(**__UpperCamelCase ) A__ , A__ = outputs.logits, outputs.pred_boxes A__ , A__ = None, None if yolos_name == "yolos_ti": A__ = torch.tensor( [[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] ) A__ = torch.tensor( [[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] ) elif yolos_name == "yolos_s_200_pre": A__ = torch.tensor( [[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] ) A__ = torch.tensor( [[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] ) elif yolos_name == "yolos_s_300_pre": A__ = torch.tensor( [[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] ) A__ = torch.tensor( [[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] ) elif yolos_name == "yolos_s_dWr": A__ = torch.tensor( [[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] ) A__ = torch.tensor( [[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] ) elif yolos_name == "yolos_base": A__ = torch.tensor( [[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] ) A__ = torch.tensor( [[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] ) else: raise ValueError(f'''Unknown yolos_name: {yolos_name}''' ) assert torch.allclose(logits[0, :3, :3] , __UpperCamelCase , atol=1E-4 ) assert torch.allclose(pred_boxes[0, :3, :3] , __UpperCamelCase , atol=1E-4 ) Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase ) print(f'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__UpperCamelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__UpperCamelCase ) if push_to_hub: A__ = { 'yolos_ti': 'yolos-tiny', 'yolos_s_200_pre': 'yolos-small', 'yolos_s_300_pre': 'yolos-small-300', 'yolos_s_dWr': 'yolos-small-dwr', 'yolos_base': 'yolos-base', } print('Pushing to the hub...' ) A__ = model_mapping[yolos_name] image_processor.push_to_hub(__UpperCamelCase , organization='hustvl' ) model.push_to_hub(__UpperCamelCase , organization='hustvl' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--yolos_name''', default='''yolos_s_200_pre''', type=str, help=( '''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\',''' ''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.''' ), ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) SCREAMING_SNAKE_CASE__ = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
52
0
def A ( __UpperCamelCase ) -> str: A__ = 0 A__ = len(__UpperCamelCase ) for i in range(n - 1 ): for j in range(i + 1 , __UpperCamelCase ): if arr[i] > arr[j]: num_inversions += 1 return num_inversions def A ( __UpperCamelCase ) -> Dict: if len(__UpperCamelCase ) <= 1: return arr, 0 A__ = len(__UpperCamelCase ) // 2 A__ = arr[0:mid] A__ = arr[mid:] A__ , A__ = count_inversions_recursive(__UpperCamelCase ) A__ , A__ = count_inversions_recursive(__UpperCamelCase ) A__ , A__ = _count_cross_inversions(__UpperCamelCase , __UpperCamelCase ) A__ = inversion_p + inversions_q + cross_inversions return c, num_inversions def A ( __UpperCamelCase , __UpperCamelCase ) -> Optional[int]: A__ = [] A__ = A__ = A__ = 0 while i < len(__UpperCamelCase ) and j < len(__UpperCamelCase ): if p[i] > q[j]: # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P) # These are all inversions. The claim emerges from the # property that P is sorted. num_inversion += len(__UpperCamelCase ) - i r.append(q[j] ) j += 1 else: r.append(p[i] ) i += 1 if i < len(__UpperCamelCase ): r.extend(p[i:] ) else: r.extend(q[j:] ) return r, num_inversion def A ( ) -> List[str]: A__ = [10, 2, 1, 5, 5, 2, 11] # this arr has 8 inversions: # (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2) A__ = count_inversions_bf(__UpperCamelCase ) A__ , A__ = count_inversions_recursive(__UpperCamelCase ) assert num_inversions_bf == num_inversions_recursive == 8 print('number of inversions = ' , __UpperCamelCase ) # testing an array with zero inversion (a sorted arr_1) arr_a.sort() A__ = count_inversions_bf(__UpperCamelCase ) A__ , A__ = count_inversions_recursive(__UpperCamelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print('number of inversions = ' , __UpperCamelCase ) # an empty list should also have zero inversions A__ = [] A__ = count_inversions_bf(__UpperCamelCase ) A__ , A__ = count_inversions_recursive(__UpperCamelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print('number of inversions = ' , __UpperCamelCase ) if __name__ == "__main__": main()
715
from typing import TYPE_CHECKING from ..utils import _LazyModule SCREAMING_SNAKE_CASE__ = { '''config''': [ '''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''', '''OnnxConfig''', '''OnnxConfigWithPast''', '''OnnxSeq2SeqConfigWithPast''', '''PatchingSpec''', ], '''convert''': ['''export''', '''validate_model_outputs'''], '''features''': ['''FeaturesManager'''], '''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
52
0
from random import randint from tempfile import TemporaryFile import numpy as np def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> str: A__ = 0 if start < end: A__ = randint(__UpperCamelCase , __UpperCamelCase ) A__ = a[end] A__ = a[pivot] A__ = temp A__ , A__ = _in_place_partition(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) count += _in_place_quick_sort(__UpperCamelCase , __UpperCamelCase , p - 1 ) count += _in_place_quick_sort(__UpperCamelCase , p + 1 , __UpperCamelCase ) return count def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]: A__ = 0 A__ = randint(__UpperCamelCase , __UpperCamelCase ) A__ = a[end] A__ = a[pivot] A__ = temp A__ = start - 1 for index in range(__UpperCamelCase , __UpperCamelCase ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value A__ = new_pivot_index + 1 A__ = a[new_pivot_index] A__ = a[index] A__ = temp A__ = a[new_pivot_index + 1] A__ = a[end] A__ = temp return new_pivot_index + 1, count SCREAMING_SNAKE_CASE__ = TemporaryFile() SCREAMING_SNAKE_CASE__ = 1_0_0 # 1000 elements are to be sorted SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 0, 1 # mean and standard deviation SCREAMING_SNAKE_CASE__ = np.random.normal(mu, sigma, p) np.save(outfile, X) print('''The array is''') print(X) outfile.seek(0) # using the same array SCREAMING_SNAKE_CASE__ = np.load(outfile) SCREAMING_SNAKE_CASE__ = len(M) - 1 SCREAMING_SNAKE_CASE__ = _in_place_quick_sort(M, 0, r) print( '''No of Comparisons for 100 elements selected from a standard normal distribution''' '''is :''' ) print(z)
716
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''} SCREAMING_SNAKE_CASE__ = { '''vocab_file''': { '''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''', }, '''tokenizer_file''': { '''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''', }, } SCREAMING_SNAKE_CASE__ = { '''google/rembert''': 2_5_6, } SCREAMING_SNAKE_CASE__ = '''▁''' class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" A__ : Any = VOCAB_FILES_NAMES A__ : str = PRETRAINED_VOCAB_FILES_MAP A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ : int = RemBertTokenizer def __init__( self : Union[str, Any] , _snake_case : Any=None , _snake_case : Optional[Any]=None , _snake_case : Any=True , _snake_case : Optional[int]=True , _snake_case : Dict=False , _snake_case : Dict="[CLS]" , _snake_case : List[Any]="[SEP]" , _snake_case : Union[str, Any]="<unk>" , _snake_case : List[str]="[SEP]" , _snake_case : List[str]="<pad>" , _snake_case : str="[CLS]" , _snake_case : Any="[MASK]" , **_snake_case : Any , ): """simple docstring""" A__ = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token super().__init__( _snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , remove_space=_snake_case , keep_accents=_snake_case , bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , **_snake_case , ) A__ = do_lower_case A__ = remove_space A__ = keep_accents A__ = vocab_file A__ = False if not self.vocab_file else True def _a ( self : Any , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ): """simple docstring""" A__ = [self.sep_token_id] A__ = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _a ( self : Tuple , _snake_case : List[int] , _snake_case : Optional[List[int]] = None , _snake_case : bool = False ): """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( 'You should not supply a second sequence if the provided sequence of ' 'ids is already formatted with special tokens for the model.' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(_snake_case )) + [1] + ([0] * len(_snake_case )) + [1] return [1] + ([0] * len(_snake_case )) + [1] def _a ( self : Dict , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ): """simple docstring""" A__ = [self.sep_token_id] A__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _a ( self : Any , _snake_case : str , _snake_case : Optional[str] = None ): """simple docstring""" if not os.path.isdir(_snake_case ): logger.error('Vocabulary path ({}) should be a directory'.format(_snake_case ) ) return A__ = os.path.join( _snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ): copyfile(self.vocab_file , _snake_case ) return (out_vocab_file,)
52
0
def A ( __UpperCamelCase = 600_851_475_143 ) -> int: try: A__ = int(__UpperCamelCase ) except (TypeError, ValueError): raise TypeError('Parameter n must be int or castable to int.' ) if n <= 0: raise ValueError('Parameter n must be greater than or equal to one.' ) A__ = 1 A__ = 2 while i * i <= n: while n % i == 0: A__ = i n //= i i += 1 if n > 1: A__ = n return int(__UpperCamelCase ) if __name__ == "__main__": print(f'{solution() = }')
717
import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch SCREAMING_SNAKE_CASE__ = '''sshleifer/bart-tiny-random''' SCREAMING_SNAKE_CASE__ = '''patrickvonplaten/t5-tiny-random''' @require_torch class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def _a ( self : Optional[int] ): """simple docstring""" return AutoConfig.from_pretrained(_snake_case ) def _a ( self : Optional[Any] ): """simple docstring""" A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.num_hidden_layers , 1 ) def _a ( self : Optional[int] ): """simple docstring""" A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=_snake_case ) def _a ( self : int ): """simple docstring""" A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=_snake_case ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers ) def _a ( self : str ): """simple docstring""" A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , 1 ) def _a ( self : str ): """simple docstring""" with self.assertRaises(_snake_case ): create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=_snake_case , d=_snake_case )
52
0
import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import KwargsHandler @dataclass class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" A__ : int = 0 A__ : bool = False A__ : float = 3.0 class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def _a ( self : Union[str, Any] ): """simple docstring""" self.assertDictEqual(MockClass().to_kwargs() , {} ) self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} ) self.assertDictEqual(MockClass(a=2 , b=_snake_case ).to_kwargs() , {'a': 2, 'b': True} ) self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} ) @require_cuda def _a ( self : Optional[int] ): """simple docstring""" A__ = GradScalerKwargs(init_scale=10_24 , growth_factor=2 ) AcceleratorState._reset_state() A__ = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] ) print(accelerator.use_fpaa ) A__ = accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale , 1024.0 ) self.assertEqual(scaler._growth_factor , 2.0 ) # Check the other values are at the default self.assertEqual(scaler._backoff_factor , 0.5 ) self.assertEqual(scaler._growth_interval , 20_00 ) self.assertEqual(scaler._enabled , _snake_case ) @require_multi_gpu def _a ( self : Dict ): """simple docstring""" A__ = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )] execute_subprocess_async(_snake_case , env=os.environ.copy() ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True) SCREAMING_SNAKE_CASE__ = Accelerator(kwargs_handlers=[ddp_scaler]) SCREAMING_SNAKE_CASE__ = torch.nn.Linear(1_0_0, 2_0_0) SCREAMING_SNAKE_CASE__ = accelerator.prepare(model) # Check the values changed in kwargs SCREAMING_SNAKE_CASE__ = '''''' SCREAMING_SNAKE_CASE__ = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4) if observed_bucket_cap_map != 1_5: error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
718
from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" A__ : Union[str, Any] = ["image_processor", "tokenizer"] A__ : Optional[Any] = "BridgeTowerImageProcessor" A__ : List[Any] = ("RobertaTokenizer", "RobertaTokenizerFast") def __init__( self : List[Any] , _snake_case : Optional[Any] , _snake_case : Optional[int] ): """simple docstring""" super().__init__(_snake_case , _snake_case ) def __call__( self : List[Any] , _snake_case : int , _snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _snake_case : bool = True , _snake_case : Union[bool, str, PaddingStrategy] = False , _snake_case : Union[bool, str, TruncationStrategy] = None , _snake_case : Optional[int] = None , _snake_case : int = 0 , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = True , _snake_case : Optional[Union[str, TensorType]] = None , **_snake_case : Optional[int] , ): """simple docstring""" A__ = self.tokenizer( text=_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , stride=_snake_case , pad_to_multiple_of=_snake_case , return_token_type_ids=_snake_case , return_attention_mask=_snake_case , return_overflowing_tokens=_snake_case , return_special_tokens_mask=_snake_case , return_offsets_mapping=_snake_case , return_length=_snake_case , verbose=_snake_case , return_tensors=_snake_case , **_snake_case , ) # add pixel_values + pixel_mask A__ = self.image_processor( _snake_case , return_tensors=_snake_case , do_normalize=_snake_case , do_center_crop=_snake_case , **_snake_case ) encoding.update(_snake_case ) return encoding def _a ( self : Any , *_snake_case : Tuple , **_snake_case : List[Any] ): """simple docstring""" return self.tokenizer.batch_decode(*_snake_case , **_snake_case ) def _a ( self : Dict , *_snake_case : Dict , **_snake_case : List[str] ): """simple docstring""" return self.tokenizer.decode(*_snake_case , **_snake_case ) @property def _a ( self : Tuple ): """simple docstring""" A__ = self.tokenizer.model_input_names A__ = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
52
0
'''simple docstring''' import argparse import os import re import packaging.version SCREAMING_SNAKE_CASE__ = '''examples/''' SCREAMING_SNAKE_CASE__ = { '''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''), '''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''), '''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''), '''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''), } SCREAMING_SNAKE_CASE__ = { '''init''': '''src/transformers/__init__.py''', '''setup''': '''setup.py''', } SCREAMING_SNAKE_CASE__ = '''README.md''' def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]: with open(__UpperCamelCase , 'r' , encoding='utf-8' , newline='\n' ) as f: A__ = f.read() A__ , A__ = REPLACE_PATTERNS[pattern] A__ = replace.replace('VERSION' , __UpperCamelCase ) A__ = re_pattern.sub(__UpperCamelCase , __UpperCamelCase ) with open(__UpperCamelCase , 'w' , encoding='utf-8' , newline='\n' ) as f: f.write(__UpperCamelCase ) def A ( __UpperCamelCase ) -> Any: for folder, directories, fnames in os.walk(__UpperCamelCase ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('research_projects' ) if "legacy" in directories: directories.remove('legacy' ) for fname in fnames: if fname.endswith('.py' ): update_version_in_file(os.path.join(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase , pattern='examples' ) def A ( __UpperCamelCase , __UpperCamelCase=False ) -> int: for pattern, fname in REPLACE_FILES.items(): update_version_in_file(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) if not patch: update_version_in_examples(__UpperCamelCase ) def A ( ) -> Dict: A__ = '🤗 Transformers currently provides the following architectures' A__ = '1. Want to contribute a new model?' with open(__UpperCamelCase , 'r' , encoding='utf-8' , newline='\n' ) as f: A__ = f.readlines() # Find the start of the list. A__ = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 A__ = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith('1.' ): A__ = lines[index].replace( 'https://huggingface.co/docs/transformers/main/model_doc' , 'https://huggingface.co/docs/transformers/model_doc' , ) index += 1 with open(__UpperCamelCase , 'w' , encoding='utf-8' , newline='\n' ) as f: f.writelines(__UpperCamelCase ) def A ( ) -> Optional[Any]: with open(REPLACE_FILES['init'] , 'r' ) as f: A__ = f.read() A__ = REPLACE_PATTERNS['init'][0].search(__UpperCamelCase ).groups()[0] return packaging.version.parse(__UpperCamelCase ) def A ( __UpperCamelCase=False ) -> Tuple: A__ = get_version() if patch and default_version.is_devrelease: raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' ) if default_version.is_devrelease: A__ = default_version.base_version elif patch: A__ = f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}''' else: A__ = f'''{default_version.major}.{default_version.minor + 1}.0''' # Now let's ask nicely if that's the right one. A__ = input(f'''Which version are you releasing? [{default_version}]''' ) if len(__UpperCamelCase ) == 0: A__ = default_version print(f'''Updating version to {version}.''' ) global_version_update(__UpperCamelCase , patch=__UpperCamelCase ) if not patch: print('Cleaning main README, don\'t forget to run `make fix-copies`.' ) clean_main_ref_in_model_list() def A ( ) -> Union[str, Any]: A__ = get_version() A__ = f'''{current_version.major}.{current_version.minor + 1}.0.dev0''' A__ = current_version.base_version # Check with the user we got that right. A__ = input(f'''Which version are we developing now? [{dev_version}]''' ) if len(__UpperCamelCase ) == 0: A__ = dev_version print(f'''Updating version to {version}.''' ) global_version_update(__UpperCamelCase ) print('Cleaning main README, don\'t forget to run `make fix-copies`.' ) clean_main_ref_in_model_list() if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''') parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''') SCREAMING_SNAKE_CASE__ = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('''Nothing to do after a patch :-)''') else: post_release_work()
719
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE__ = { '''configuration_xlm_roberta''': [ '''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMRobertaConfig''', '''XLMRobertaOnnxConfig''', ], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = ['''XLMRobertaTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = ['''XLMRobertaTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ '''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XLMRobertaForCausalLM''', '''XLMRobertaForMaskedLM''', '''XLMRobertaForMultipleChoice''', '''XLMRobertaForQuestionAnswering''', '''XLMRobertaForSequenceClassification''', '''XLMRobertaForTokenClassification''', '''XLMRobertaModel''', '''XLMRobertaPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ '''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXLMRobertaForCausalLM''', '''TFXLMRobertaForMaskedLM''', '''TFXLMRobertaForMultipleChoice''', '''TFXLMRobertaForQuestionAnswering''', '''TFXLMRobertaForSequenceClassification''', '''TFXLMRobertaForTokenClassification''', '''TFXLMRobertaModel''', '''TFXLMRobertaPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ '''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FlaxXLMRobertaForMaskedLM''', '''FlaxXLMRobertaForCausalLM''', '''FlaxXLMRobertaForMultipleChoice''', '''FlaxXLMRobertaForQuestionAnswering''', '''FlaxXLMRobertaForSequenceClassification''', '''FlaxXLMRobertaForTokenClassification''', '''FlaxXLMRobertaModel''', '''FlaxXLMRobertaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig, XLMRobertaOnnxConfig, ) try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta import XLMRobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaForCausalLM, XLMRobertaForMaskedLM, XLMRobertaForMultipleChoice, XLMRobertaForQuestionAnswering, XLMRobertaForSequenceClassification, XLMRobertaForTokenClassification, XLMRobertaModel, XLMRobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm_roberta import ( TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMRobertaForCausalLM, TFXLMRobertaForMaskedLM, TFXLMRobertaForMultipleChoice, TFXLMRobertaForQuestionAnswering, TFXLMRobertaForSequenceClassification, TFXLMRobertaForTokenClassification, TFXLMRobertaModel, TFXLMRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xlm_roberta import ( FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxXLMRobertaForCausalLM, FlaxXLMRobertaForMaskedLM, FlaxXLMRobertaForMultipleChoice, FlaxXLMRobertaForQuestionAnswering, FlaxXLMRobertaForSequenceClassification, FlaxXLMRobertaForTokenClassification, FlaxXLMRobertaModel, FlaxXLMRobertaPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
52
0
def A ( __UpperCamelCase ) -> int: if not isinstance(__UpperCamelCase , __UpperCamelCase ) or number < 0: raise ValueError('Input must be a non-negative integer' ) A__ = 0 while number: # This way we arrive at next set bit (next 1) instead of looping # through each bit and checking for 1s hence the # loop won't run 32 times it will only run the number of `1` times number &= number - 1 count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
720
import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) transformers_logging.set_verbosity_info() def A ( __UpperCamelCase ) -> Tuple: if "token" in model_name_or_path: return "rag_token" if "sequence" in model_name_or_path: return "rag_sequence" if "bart" in model_name_or_path: return "bart" return None def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]: return max(metric_fn(__UpperCamelCase , __UpperCamelCase ) for gt in ground_truths ) def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]: A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()] A__ = [] if args.gold_data_mode == "qa": A__ = pd.read_csv(__UpperCamelCase , sep='\t' , header=__UpperCamelCase ) for answer_list in data[1]: A__ = ast.literal_eval(__UpperCamelCase ) answers.append(__UpperCamelCase ) else: A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()] A__ = [[reference] for reference in references] A__ = A__ = A__ = 0 for prediction, ground_truths in zip(__UpperCamelCase , __UpperCamelCase ): total += 1 em += metric_max_over_ground_truths(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) fa += metric_max_over_ground_truths(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) A__ = 100.0 * em / total A__ = 100.0 * fa / total logger.info(f'''F1: {fa:.2f}''' ) logger.info(f'''EM: {em:.2f}''' ) def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[int]: A__ = args.k A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()] A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()] A__ = A__ = 0 for hypo, reference in zip(__UpperCamelCase , __UpperCamelCase ): A__ = set(hypo.split('\t' )[:k] ) A__ = set(reference.split('\t' ) ) total += 1 em += len(hypo_provenance & ref_provenance ) / k A__ = 100.0 * em / total logger.info(f'''Precision@{k}: {em: .2f}''' ) def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]: def strip_title(__UpperCamelCase ): if title.startswith('"' ): A__ = title[1:] if title.endswith('"' ): A__ = title[:-1] return title A__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __UpperCamelCase , return_tensors='pt' , padding=__UpperCamelCase , truncation=__UpperCamelCase , )['input_ids'].to(args.device ) A__ = rag_model.rag.question_encoder(__UpperCamelCase ) A__ = question_enc_outputs[0] A__ = rag_model.retriever( __UpperCamelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='pt' , ) A__ = rag_model.retriever.index.get_doc_dicts(result.doc_ids ) A__ = [] for docs in all_docs: A__ = [strip_title(__UpperCamelCase ) for title in docs['title']] provenance_strings.append('\t'.join(__UpperCamelCase ) ) return provenance_strings def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]: with torch.no_grad(): A__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __UpperCamelCase , return_tensors='pt' , padding=__UpperCamelCase , truncation=__UpperCamelCase ) A__ = inputs_dict.input_ids.to(args.device ) A__ = inputs_dict.attention_mask.to(args.device ) A__ = rag_model.generate( # rag_model overwrites generate __UpperCamelCase , attention_mask=__UpperCamelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__UpperCamelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , ) A__ = rag_model.retriever.generator_tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase ) if args.print_predictions: for q, a in zip(__UpperCamelCase , __UpperCamelCase ): logger.info('Q: {} - A: {}'.format(__UpperCamelCase , __UpperCamelCase ) ) return answers def A ( ) -> Any: A__ = argparse.ArgumentParser() parser.add_argument( '--model_type' , choices=['rag_sequence', 'rag_token', 'bart'] , type=__UpperCamelCase , help=( 'RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the' ' model_name_or_path' ) , ) parser.add_argument( '--index_name' , default=__UpperCamelCase , choices=['exact', 'compressed', 'legacy'] , type=__UpperCamelCase , help='RAG model retriever type' , ) parser.add_argument( '--index_path' , default=__UpperCamelCase , type=__UpperCamelCase , help='Path to the retrieval index' , ) parser.add_argument('--n_docs' , default=5 , type=__UpperCamelCase , help='Number of retrieved docs' ) parser.add_argument( '--model_name_or_path' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to pretrained checkpoints or model identifier from huggingface.co/models' , ) parser.add_argument( '--eval_mode' , choices=['e2e', 'retrieval'] , default='e2e' , type=__UpperCamelCase , help=( 'Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates' ' precision@k.' ) , ) parser.add_argument('--k' , default=1 , type=__UpperCamelCase , help='k for the precision@k calculation' ) parser.add_argument( '--evaluation_set' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to a file containing evaluation samples' , ) parser.add_argument( '--gold_data_path' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to a tab-separated file with gold samples' , ) parser.add_argument( '--gold_data_mode' , default='qa' , type=__UpperCamelCase , choices=['qa', 'ans'] , help=( 'Format of the gold data file' 'qa - a single line in the following format: question [tab] answer_list' 'ans - a single line of the gold file contains the expected answer string' ) , ) parser.add_argument( '--predictions_path' , type=__UpperCamelCase , default='predictions.txt' , help='Name of the predictions file, to be stored in the checkpoints directory' , ) parser.add_argument( '--eval_all_checkpoints' , action='store_true' , help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number' , ) parser.add_argument( '--eval_batch_size' , default=8 , type=__UpperCamelCase , help='Batch size per GPU/CPU for evaluation.' , ) parser.add_argument( '--recalculate' , help='Recalculate predictions even if the prediction file exists' , action='store_true' , ) parser.add_argument( '--num_beams' , default=4 , type=__UpperCamelCase , help='Number of beams to be used when generating answers' , ) parser.add_argument('--min_length' , default=1 , type=__UpperCamelCase , help='Min length of the generated answers' ) parser.add_argument('--max_length' , default=50 , type=__UpperCamelCase , help='Max length of the generated answers' ) parser.add_argument( '--print_predictions' , action='store_true' , help='If True, prints predictions while evaluating.' , ) parser.add_argument( '--print_docs' , action='store_true' , help='If True, prints docs retried while generating.' , ) A__ = parser.parse_args() A__ = torch.device('cuda' if torch.cuda.is_available() else 'cpu' ) return args def A ( __UpperCamelCase ) -> int: A__ = {} if args.model_type is None: A__ = infer_model_type(args.model_name_or_path ) assert args.model_type is not None if args.model_type.startswith('rag' ): A__ = RagTokenForGeneration if args.model_type == 'rag_token' else RagSequenceForGeneration A__ = args.n_docs if args.index_name is not None: A__ = args.index_name if args.index_path is not None: A__ = args.index_path else: A__ = BartForConditionalGeneration A__ = ( [f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()] if args.eval_all_checkpoints else [args.model_name_or_path] ) logger.info('Evaluate the following checkpoints: %s' , __UpperCamelCase ) A__ = get_scores if args.eval_mode == 'e2e' else get_precision_at_k A__ = evaluate_batch_eae if args.eval_mode == 'e2e' else evaluate_batch_retrieval for checkpoint in checkpoints: if os.path.exists(args.predictions_path ) and (not args.recalculate): logger.info('Calculating metrics based on an existing predictions file: {}'.format(args.predictions_path ) ) score_fn(__UpperCamelCase , args.predictions_path , args.gold_data_path ) continue logger.info('***** Running evaluation for {} *****'.format(__UpperCamelCase ) ) logger.info(' Batch size = %d' , args.eval_batch_size ) logger.info(' Predictions will be stored under {}'.format(args.predictions_path ) ) if args.model_type.startswith('rag' ): A__ = RagRetriever.from_pretrained(__UpperCamelCase , **__UpperCamelCase ) A__ = model_class.from_pretrained(__UpperCamelCase , retriever=__UpperCamelCase , **__UpperCamelCase ) model.retriever.init_retrieval() else: A__ = model_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase ) model.to(args.device ) with open(args.evaluation_set , 'r' ) as eval_file, open(args.predictions_path , 'w' ) as preds_file: A__ = [] for line in tqdm(__UpperCamelCase ): questions.append(line.strip() ) if len(__UpperCamelCase ) == args.eval_batch_size: A__ = evaluate_batch_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) preds_file.write('\n'.join(__UpperCamelCase ) + '\n' ) preds_file.flush() A__ = [] if len(__UpperCamelCase ) > 0: A__ = evaluate_batch_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) preds_file.write('\n'.join(__UpperCamelCase ) ) preds_file.flush() score_fn(__UpperCamelCase , args.predictions_path , args.gold_data_path ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = get_args() main(args)
52
0
import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy if is_flax_available(): import jax.numpy as jnp class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __get__( self : str , _snake_case : List[str] , _snake_case : List[Any]=None ): """simple docstring""" if obj is None: return self if self.fget is None: raise AttributeError('unreadable attribute' ) A__ = '__cached_' + self.fget.__name__ A__ = getattr(_snake_case , _snake_case , _snake_case ) if cached is None: A__ = self.fget(_snake_case ) setattr(_snake_case , _snake_case , _snake_case ) return cached def A ( __UpperCamelCase ) -> Optional[Any]: A__ = val.lower() if val in {"y", "yes", "t", "true", "on", "1"}: return 1 if val in {"n", "no", "f", "false", "off", "0"}: return 0 raise ValueError(f'''invalid truth value {val!r}''' ) def A ( __UpperCamelCase ) -> Any: if is_torch_fx_proxy(__UpperCamelCase ): return True if is_torch_available(): import torch if isinstance(__UpperCamelCase , torch.Tensor ): return True if is_tf_available(): import tensorflow as tf if isinstance(__UpperCamelCase , tf.Tensor ): return True if is_flax_available(): import jax.numpy as jnp from jax.core import Tracer if isinstance(__UpperCamelCase , (jnp.ndarray, Tracer) ): return True return isinstance(__UpperCamelCase , np.ndarray ) def A ( __UpperCamelCase ) -> Union[str, Any]: return isinstance(__UpperCamelCase , np.ndarray ) def A ( __UpperCamelCase ) -> List[Any]: return _is_numpy(__UpperCamelCase ) def A ( __UpperCamelCase ) -> str: import torch return isinstance(__UpperCamelCase , torch.Tensor ) def A ( __UpperCamelCase ) -> Any: return False if not is_torch_available() else _is_torch(__UpperCamelCase ) def A ( __UpperCamelCase ) -> str: import torch return isinstance(__UpperCamelCase , torch.device ) def A ( __UpperCamelCase ) -> List[Any]: return False if not is_torch_available() else _is_torch_device(__UpperCamelCase ) def A ( __UpperCamelCase ) -> List[Any]: import torch if isinstance(__UpperCamelCase , __UpperCamelCase ): if hasattr(__UpperCamelCase , __UpperCamelCase ): A__ = getattr(__UpperCamelCase , __UpperCamelCase ) else: return False return isinstance(__UpperCamelCase , torch.dtype ) def A ( __UpperCamelCase ) -> List[Any]: return False if not is_torch_available() else _is_torch_dtype(__UpperCamelCase ) def A ( __UpperCamelCase ) -> Any: import tensorflow as tf return isinstance(__UpperCamelCase , tf.Tensor ) def A ( __UpperCamelCase ) -> List[Any]: return False if not is_tf_available() else _is_tensorflow(__UpperCamelCase ) def A ( __UpperCamelCase ) -> Any: import tensorflow as tf # the `is_symbolic_tensor` predicate is only available starting with TF 2.14 if hasattr(__UpperCamelCase , 'is_symbolic_tensor' ): return tf.is_symbolic_tensor(__UpperCamelCase ) return type(__UpperCamelCase ) == tf.Tensor def A ( __UpperCamelCase ) -> Any: return False if not is_tf_available() else _is_tf_symbolic_tensor(__UpperCamelCase ) def A ( __UpperCamelCase ) -> Optional[Any]: import jax.numpy as jnp # noqa: F811 return isinstance(__UpperCamelCase , jnp.ndarray ) def A ( __UpperCamelCase ) -> str: return False if not is_flax_available() else _is_jax(__UpperCamelCase ) def A ( __UpperCamelCase ) -> Any: if isinstance(__UpperCamelCase , (dict, UserDict) ): return {k: to_py_obj(__UpperCamelCase ) for k, v in obj.items()} elif isinstance(__UpperCamelCase , (list, tuple) ): return [to_py_obj(__UpperCamelCase ) for o in obj] elif is_tf_tensor(__UpperCamelCase ): return obj.numpy().tolist() elif is_torch_tensor(__UpperCamelCase ): return obj.detach().cpu().tolist() elif is_jax_tensor(__UpperCamelCase ): return np.asarray(__UpperCamelCase ).tolist() elif isinstance(__UpperCamelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays return obj.tolist() else: return obj def A ( __UpperCamelCase ) -> Dict: if isinstance(__UpperCamelCase , (dict, UserDict) ): return {k: to_numpy(__UpperCamelCase ) for k, v in obj.items()} elif isinstance(__UpperCamelCase , (list, tuple) ): return np.array(__UpperCamelCase ) elif is_tf_tensor(__UpperCamelCase ): return obj.numpy() elif is_torch_tensor(__UpperCamelCase ): return obj.detach().cpu().numpy() elif is_jax_tensor(__UpperCamelCase ): return np.asarray(__UpperCamelCase ) else: return obj class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def _a ( self : Tuple ): """simple docstring""" A__ = fields(self ) # Safety and consistency checks if not len(_snake_case ): raise ValueError(F'''{self.__class__.__name__} has no fields.''' ) if not all(field.default is None for field in class_fields[1:] ): raise ValueError(F'''{self.__class__.__name__} should not have more than one required field.''' ) A__ = getattr(self , class_fields[0].name ) A__ = all(getattr(self , field.name ) is None for field in class_fields[1:] ) if other_fields_are_none and not is_tensor(_snake_case ): if isinstance(_snake_case , _snake_case ): A__ = first_field.items() A__ = True else: try: A__ = iter(_snake_case ) A__ = True except TypeError: A__ = False # if we provided an iterator as first field and the iterator is a (key, value) iterator # set the associated fields if first_field_iterator: for idx, element in enumerate(_snake_case ): if ( not isinstance(_snake_case , (list, tuple) ) or not len(_snake_case ) == 2 or not isinstance(element[0] , _snake_case ) ): if idx == 0: # If we do not have an iterator of key/values, set it as attribute A__ = first_field else: # If we have a mixed iterator, raise an error raise ValueError( F'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' ) break setattr(self , element[0] , element[1] ) if element[1] is not None: A__ = element[1] elif first_field is not None: A__ = first_field else: for field in class_fields: A__ = getattr(self , field.name ) if v is not None: A__ = v def __delitem__( self : List[Any] , *_snake_case : str , **_snake_case : Optional[Any] ): """simple docstring""" raise Exception(F'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' ) def _a ( self : Any , *_snake_case : List[str] , **_snake_case : List[str] ): """simple docstring""" raise Exception(F'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' ) def _a ( self : Optional[int] , *_snake_case : List[Any] , **_snake_case : str ): """simple docstring""" raise Exception(F'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' ) def _a ( self : Union[str, Any] , *_snake_case : Tuple , **_snake_case : Optional[int] ): """simple docstring""" raise Exception(F'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' ) def __getitem__( self : Optional[int] , _snake_case : List[Any] ): """simple docstring""" if isinstance(_snake_case , _snake_case ): A__ = dict(self.items() ) return inner_dict[k] else: return self.to_tuple()[k] def __setattr__( self : Union[str, Any] , _snake_case : Tuple , _snake_case : int ): """simple docstring""" if name in self.keys() and value is not None: # Don't call self.__setitem__ to avoid recursion errors super().__setitem__(_snake_case , _snake_case ) super().__setattr__(_snake_case , _snake_case ) def __setitem__( self : List[str] , _snake_case : str , _snake_case : Any ): """simple docstring""" super().__setitem__(_snake_case , _snake_case ) # Don't call self.__setattr__ to avoid recursion errors super().__setattr__(_snake_case , _snake_case ) def _a ( self : Union[str, Any] ): """simple docstring""" return tuple(self[k] for k in self.keys() ) class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ ): """simple docstring""" @classmethod def _a ( cls : Any , _snake_case : Optional[int] ): """simple docstring""" raise ValueError( F'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' ) class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" A__ : Optional[Any] = "longest" A__ : str = "max_length" A__ : Dict = "do_not_pad" class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" A__ : int = "pt" A__ : List[str] = "tf" A__ : List[Any] = "np" A__ : Any = "jax" class __lowerCAmelCase : """simple docstring""" def __init__( self : Optional[int] , _snake_case : List[ContextManager] ): """simple docstring""" A__ = context_managers A__ = ExitStack() def __enter__( self : Tuple ): """simple docstring""" for context_manager in self.context_managers: self.stack.enter_context(_snake_case ) def __exit__( self : List[Any] , *_snake_case : Tuple , **_snake_case : str ): """simple docstring""" self.stack.__exit__(*_snake_case , **_snake_case ) def A ( __UpperCamelCase ) -> List[Any]: A__ = infer_framework(__UpperCamelCase ) if framework == "tf": A__ = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": A__ = inspect.signature(model_class.forward ) # PyTorch models else: A__ = inspect.signature(model_class.__call__ ) # Flax models for p in signature.parameters: if p == "return_loss" and signature.parameters[p].default is True: return True return False def A ( __UpperCamelCase ) -> Dict: A__ = model_class.__name__ A__ = infer_framework(__UpperCamelCase ) if framework == "tf": A__ = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": A__ = inspect.signature(model_class.forward ) # PyTorch models else: A__ = inspect.signature(model_class.__call__ ) # Flax models if "QuestionAnswering" in model_name: return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")] else: return [p for p in signature.parameters if "label" in p] def A ( __UpperCamelCase , __UpperCamelCase = "" , __UpperCamelCase = "." ) -> Any: def _flatten_dict(__UpperCamelCase , __UpperCamelCase="" , __UpperCamelCase="." ): for k, v in d.items(): A__ = str(__UpperCamelCase ) + delimiter + str(__UpperCamelCase ) if parent_key else k if v and isinstance(__UpperCamelCase , __UpperCamelCase ): yield from flatten_dict(__UpperCamelCase , __UpperCamelCase , delimiter=__UpperCamelCase ).items() else: yield key, v return dict(_flatten_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) ) @contextmanager def A ( __UpperCamelCase , __UpperCamelCase = False ) -> int: if use_temp_dir: with tempfile.TemporaryDirectory() as tmp_dir: yield tmp_dir else: yield working_dir def A ( __UpperCamelCase , __UpperCamelCase=None ) -> List[Any]: if is_numpy_array(__UpperCamelCase ): return np.transpose(__UpperCamelCase , axes=__UpperCamelCase ) elif is_torch_tensor(__UpperCamelCase ): return array.T if axes is None else array.permute(*__UpperCamelCase ) elif is_tf_tensor(__UpperCamelCase ): import tensorflow as tf return tf.transpose(__UpperCamelCase , perm=__UpperCamelCase ) elif is_jax_tensor(__UpperCamelCase ): return jnp.transpose(__UpperCamelCase , axes=__UpperCamelCase ) else: raise ValueError(f'''Type not supported for transpose: {type(__UpperCamelCase )}.''' ) def A ( __UpperCamelCase , __UpperCamelCase ) -> List[Any]: if is_numpy_array(__UpperCamelCase ): return np.reshape(__UpperCamelCase , __UpperCamelCase ) elif is_torch_tensor(__UpperCamelCase ): return array.reshape(*__UpperCamelCase ) elif is_tf_tensor(__UpperCamelCase ): import tensorflow as tf return tf.reshape(__UpperCamelCase , __UpperCamelCase ) elif is_jax_tensor(__UpperCamelCase ): return jnp.reshape(__UpperCamelCase , __UpperCamelCase ) else: raise ValueError(f'''Type not supported for reshape: {type(__UpperCamelCase )}.''' ) def A ( __UpperCamelCase , __UpperCamelCase=None ) -> Tuple: if is_numpy_array(__UpperCamelCase ): return np.squeeze(__UpperCamelCase , axis=__UpperCamelCase ) elif is_torch_tensor(__UpperCamelCase ): return array.squeeze() if axis is None else array.squeeze(dim=__UpperCamelCase ) elif is_tf_tensor(__UpperCamelCase ): import tensorflow as tf return tf.squeeze(__UpperCamelCase , axis=__UpperCamelCase ) elif is_jax_tensor(__UpperCamelCase ): return jnp.squeeze(__UpperCamelCase , axis=__UpperCamelCase ) else: raise ValueError(f'''Type not supported for squeeze: {type(__UpperCamelCase )}.''' ) def A ( __UpperCamelCase , __UpperCamelCase ) -> List[Any]: if is_numpy_array(__UpperCamelCase ): return np.expand_dims(__UpperCamelCase , __UpperCamelCase ) elif is_torch_tensor(__UpperCamelCase ): return array.unsqueeze(dim=__UpperCamelCase ) elif is_tf_tensor(__UpperCamelCase ): import tensorflow as tf return tf.expand_dims(__UpperCamelCase , axis=__UpperCamelCase ) elif is_jax_tensor(__UpperCamelCase ): return jnp.expand_dims(__UpperCamelCase , axis=__UpperCamelCase ) else: raise ValueError(f'''Type not supported for expand_dims: {type(__UpperCamelCase )}.''' ) def A ( __UpperCamelCase ) -> Optional[Any]: if is_numpy_array(__UpperCamelCase ): return np.size(__UpperCamelCase ) elif is_torch_tensor(__UpperCamelCase ): return array.numel() elif is_tf_tensor(__UpperCamelCase ): import tensorflow as tf return tf.size(__UpperCamelCase ) elif is_jax_tensor(__UpperCamelCase ): return array.size else: raise ValueError(f'''Type not supported for expand_dims: {type(__UpperCamelCase )}.''' ) def A ( __UpperCamelCase , __UpperCamelCase ) -> Tuple: for key, value in auto_map.items(): if isinstance(__UpperCamelCase , (tuple, list) ): A__ = [f'''{repo_id}--{v}''' if (v is not None and '--' not in v) else v for v in value] elif value is not None and "--" not in value: A__ = f'''{repo_id}--{value}''' return auto_map def A ( __UpperCamelCase ) -> Optional[int]: for base_class in inspect.getmro(__UpperCamelCase ): A__ = base_class.__module__ A__ = base_class.__name__ if module.startswith('tensorflow' ) or module.startswith('keras' ) or name == "TFPreTrainedModel": return "tf" elif module.startswith('torch' ) or name == "PreTrainedModel": return "pt" elif module.startswith('flax' ) or module.startswith('jax' ) or name == "FlaxPreTrainedModel": return "flax" else: raise TypeError(f'''Could not infer framework from class {model_class}.''' )
721
import inspect import unittest from transformers import ViTHybridConfig from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class __lowerCAmelCase : """simple docstring""" def __init__( self : List[Any] , _snake_case : Any , _snake_case : Optional[int]=13 , _snake_case : Optional[Any]=64 , _snake_case : List[str]=2 , _snake_case : Any=3 , _snake_case : Union[str, Any]=True , _snake_case : Dict=True , _snake_case : int=32 , _snake_case : int=5 , _snake_case : Union[str, Any]=4 , _snake_case : int=37 , _snake_case : Tuple="gelu" , _snake_case : Optional[int]=0.1 , _snake_case : Dict=0.1 , _snake_case : List[str]=10 , _snake_case : Union[str, Any]=0.02 , _snake_case : Dict=[1, 16, 4, 4] , _snake_case : Dict=None , ): """simple docstring""" A__ = parent A__ = batch_size A__ = image_size A__ = patch_size A__ = num_channels A__ = is_training A__ = use_labels A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = type_sequence_label_size A__ = initializer_range A__ = scope A__ = backbone_featmap_shape # in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) # the number of patches is based on the feature map of the backbone, which by default uses an output stride # of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size A__ = (self.image_size // 32) ** 2 A__ = num_patches + 1 def _a ( self : Any ): """simple docstring""" A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A__ = self.get_config() return config, pixel_values, labels def _a ( self : Tuple ): """simple docstring""" A__ = { 'global_padding': 'same', 'layer_type': 'bottleneck', 'depths': [3, 4, 9], 'out_features': ['stage1', 'stage2', 'stage3'], 'embedding_dynamic_padding': True, 'hidden_sizes': [4, 8, 16, 32], 'num_groups': 2, } return ViTHybridConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_snake_case , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=_snake_case , ) def _a ( self : int , _snake_case : Optional[int] , _snake_case : Union[str, Any] , _snake_case : Optional[int] ): """simple docstring""" A__ = ViTHybridModel(config=_snake_case ) model.to(_snake_case ) model.eval() A__ = model(_snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self : List[str] , _snake_case : str , _snake_case : Union[str, Any] , _snake_case : Any ): """simple docstring""" A__ = self.type_sequence_label_size A__ = ViTHybridForImageClassification(_snake_case ) model.to(_snake_case ) model.eval() A__ = model(_snake_case , labels=_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _a ( self : Dict ): """simple docstring""" A__ = self.prepare_config_and_inputs() A__ , A__ , A__ = config_and_inputs A__ = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" A__ : Union[str, Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else () A__ : str = ( {"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification} if is_torch_available() else {} ) A__ : Union[str, Any] = False A__ : Any = False A__ : Union[str, Any] = False def _a ( self : Dict ): """simple docstring""" A__ = ViTHybridModelTester(self ) A__ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37 ) def _a ( self : int ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='ViT does not use inputs_embeds' ) def _a ( self : int ): """simple docstring""" pass def _a ( self : int ): """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(_snake_case ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_snake_case , nn.Linear ) ) def _a ( self : List[str] ): """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(_snake_case ) A__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ = [*signature.parameters.keys()] A__ = ['pixel_values'] self.assertListEqual(arg_names[:1] , _snake_case ) def _a ( self : Any ): """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) def _a ( self : str ): """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_snake_case ) def _a ( self : Any ): """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = _config_zero_init(_snake_case ) for model_class in self.all_model_classes: A__ = model_class(config=_snake_case ) # Skip the check for the backbone for name, module in model.named_modules(): if module.__class__.__name__ == "ViTHybridPatchEmbeddings": A__ = [F'''{name}.{key}''' for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @slow def _a ( self : int ): """simple docstring""" for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ = ViTHybridModel.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) def A ( ) -> Union[str, Any]: A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def _a ( self : Tuple ): """simple docstring""" return ( ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _a ( self : Optional[Any] ): """simple docstring""" A__ = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to( _snake_case ) A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(images=_snake_case , return_tensors='pt' ).to(_snake_case ) # forward pass with torch.no_grad(): A__ = model(**_snake_case ) # verify the logits A__ = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , _snake_case ) A__ = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(_snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1E-4 ) ) @slow @require_accelerate def _a ( self : List[Any] ): """simple docstring""" A__ = ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' ) A__ = ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' ) A__ = prepare_img() A__ = image_processor(images=_snake_case , return_tensors='pt' ) A__ = model(**_snake_case ) A__ = outputs.logits # model predicts one of the 1000 ImageNet classes A__ = logits.argmax(-1 ).item() self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' )
52
0
import unittest from transformers.utils.backbone_utils import ( BackboneMixin, get_aligned_output_features_output_indices, verify_out_features_out_indices, ) class __UpperCamelCase ( unittest.TestCase ): def _a ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" __lowercase = ["""a""", """b""", """c"""] # Defaults to last layer if both are None __lowercase , __lowercase = get_aligned_output_features_output_indices(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , ["""c"""] ) self.assertEqual(_lowerCAmelCase , [2] ) # Out indices set to match out features __lowercase , __lowercase = get_aligned_output_features_output_indices(["""a""", """c"""] , _lowerCAmelCase , _lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , ["""a""", """c"""] ) self.assertEqual(_lowerCAmelCase , [0, 2] ) # Out features set to match out indices __lowercase , __lowercase = get_aligned_output_features_output_indices(_lowerCAmelCase , [0, 2] , _lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , ["""a""", """c"""] ) self.assertEqual(_lowerCAmelCase , [0, 2] ) # Out features selected from negative indices __lowercase , __lowercase = get_aligned_output_features_output_indices(_lowerCAmelCase , [-3, -1] , _lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , ["""a""", """c"""] ) self.assertEqual(_lowerCAmelCase , [-3, -1] ) def _a ( self : str ) -> int: """simple docstring""" with self.assertRaises(_lowerCAmelCase ): verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , _lowerCAmelCase ) # Out features must be a list with self.assertRaises(_lowerCAmelCase ): verify_out_features_out_indices(("""a""", """b""") , (0, 1) , ["""a""", """b"""] ) # Out features must be a subset of stage names with self.assertRaises(_lowerCAmelCase ): verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , ["""a"""] ) # Out indices must be a list or tuple with self.assertRaises(_lowerCAmelCase ): verify_out_features_out_indices(_lowerCAmelCase , 0 , ["""a""", """b"""] ) # Out indices must be a subset of stage names with self.assertRaises(_lowerCAmelCase ): verify_out_features_out_indices(_lowerCAmelCase , (0, 1) , ["""a"""] ) # Out features and out indices must be the same length with self.assertRaises(_lowerCAmelCase ): verify_out_features_out_indices(["""a""", """b"""] , (0,) , ["""a""", """b""", """c"""] ) # Out features should match out indices with self.assertRaises(_lowerCAmelCase ): verify_out_features_out_indices(["""a""", """b"""] , (0, 2) , ["""a""", """b""", """c"""] ) # Out features and out indices should be in order with self.assertRaises(_lowerCAmelCase ): verify_out_features_out_indices(["""b""", """a"""] , (0, 1) , ["""a""", """b"""] ) # Check passes with valid inputs verify_out_features_out_indices(["""a""", """b""", """d"""] , (0, 1, -1) , ["""a""", """b""", """c""", """d"""] ) def _a ( self : Tuple ) -> List[Any]: """simple docstring""" __lowercase = BackboneMixin() __lowercase = ["""a""", """b""", """c"""] __lowercase = ["""a""", """c"""] __lowercase = [0, 2] # Check that the output features and indices are set correctly self.assertEqual(backbone.out_features , ["""a""", """c"""] ) self.assertEqual(backbone.out_indices , [0, 2] ) # Check out features and indices are updated correctly __lowercase = ["""a""", """b"""] self.assertEqual(backbone.out_features , ["""a""", """b"""] ) self.assertEqual(backbone.out_indices , [0, 1] ) __lowercase = [-3, -1] self.assertEqual(backbone.out_features , ["""a""", """c"""] ) self.assertEqual(backbone.out_indices , [-3, -1] )
53
import heapq import sys import numpy as np __UpperCamelCase : List[str] = tuple[int, int] class __UpperCamelCase : def __init__( self : Optional[int] ) -> Dict: """simple docstring""" __lowercase = [] __lowercase = set() def _a ( self : int ) -> List[Any]: """simple docstring""" if not self.empty(): return self.elements[0][0] else: return float("""inf""" ) def _a ( self : Dict ) -> Optional[Any]: """simple docstring""" return len(self.elements ) == 0 def _a ( self : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] ) -> Dict: """simple docstring""" if item not in self.set: heapq.heappush(self.elements , (priority, item) ) self.set.add(_lowerCAmelCase ) else: # update # print("update", item) __lowercase = [] ((__lowercase) , (__lowercase)) = heapq.heappop(self.elements ) while x != item: temp.append((pri, x) ) ((__lowercase) , (__lowercase)) = heapq.heappop(self.elements ) temp.append((priority, item) ) for pro, xxx in temp: heapq.heappush(self.elements , (pro, xxx) ) def _a ( self : List[str] , _lowerCAmelCase : Optional[Any] ) -> Dict: """simple docstring""" if item in self.set: self.set.remove(_lowerCAmelCase ) __lowercase = [] ((__lowercase) , (__lowercase)) = heapq.heappop(self.elements ) while x != item: temp.append((pro, x) ) ((__lowercase) , (__lowercase)) = heapq.heappop(self.elements ) for prito, yyy in temp: heapq.heappush(self.elements , (prito, yyy) ) def _a ( self : Any ) -> List[Any]: """simple docstring""" return self.elements[0][1] def _a ( self : Optional[int] ) -> List[Any]: """simple docstring""" ((__lowercase) , (__lowercase)) = heapq.heappop(self.elements ) self.set.remove(_lowerCAmelCase ) return (priority, item) def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = np.array(lowerCamelCase ) __lowercase = np.array(lowerCamelCase ) return np.linalg.norm(a - b ) def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' return consistent_heuristic(lowerCamelCase , lowerCamelCase ) // t def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] ) def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = g_function[start] + Wa * heuristics[i](lowerCamelCase , lowerCamelCase ) return ans def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = np.chararray((n, n) ) for i in range(lowerCamelCase ): for j in range(lowerCamelCase ): __lowercase = """*""" for i in range(lowerCamelCase ): for j in range(lowerCamelCase ): if (j, (n - 1) - i) in blocks: __lowercase = """#""" __lowercase = """-""" __lowercase = back_pointer[goal] while x != start: ((__lowercase) , (__lowercase)) = x # print(x) __lowercase = """-""" __lowercase = back_pointer[x] __lowercase = """-""" for i in range(lowerCamelCase ): for j in range(lowerCamelCase ): if (i, j) == (0, n - 1): print(grid[i][j] , end=""" """ ) print("""<-- End position""" , end=""" """ ) else: print(grid[i][j] , end=""" """ ) print() print("""^""" ) print("""Start position""" ) print() print("""# is an obstacle""" ) print("""- is the path taken by algorithm""" ) print("""PATH TAKEN BY THE ALGORITHM IS:-""" ) __lowercase = back_pointer[goal] while x != start: print(lowerCamelCase , end=""" """ ) __lowercase = back_pointer[x] print(lowerCamelCase ) sys.exit() def snake_case ( lowerCamelCase ): '''simple docstring''' if p[0] < 0 or p[0] > n - 1: return False if p[1] < 0 or p[1] > n - 1: return False return True def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ): '''simple docstring''' for itera in range(lowerCamelCase ): open_list[itera].remove_element(lowerCamelCase ) # print("s", s) # print("j", j) ((__lowercase) , (__lowercase)) = s __lowercase = (x - 1, y) __lowercase = (x + 1, y) __lowercase = (x, y + 1) __lowercase = (x, y - 1) for neighbours in [left, right, up, down]: if neighbours not in blocks: if valid(lowerCamelCase ) and neighbours not in visited: # print("neighbour", neighbours) visited.add(lowerCamelCase ) __lowercase = -1 __lowercase = float("""inf""" ) if valid(lowerCamelCase ) and g_function[neighbours] > g_function[s] + 1: __lowercase = g_function[s] + 1 __lowercase = s if neighbours not in close_list_anchor: open_list[0].put(lowerCamelCase , key(lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase ) ) if neighbours not in close_list_inad: for var in range(1 , lowerCamelCase ): if key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) <= Wa * key( lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase ): open_list[j].put( lowerCamelCase , key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) ) def snake_case ( ): '''simple docstring''' __lowercase = [] for x in range(1 , 5 ): for y in range(1 , 6 ): some_list.append((x, y) ) for x in range(15 , 20 ): some_list.append((x, 17) ) for x in range(10 , 19 ): for y in range(1 , 15 ): some_list.append((x, y) ) # L block for x in range(1 , 4 ): for y in range(12 , 19 ): some_list.append((x, y) ) for x in range(3 , 13 ): for y in range(16 , 19 ): some_list.append((x, y) ) return some_list __UpperCamelCase : Optional[int] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a} __UpperCamelCase : Optional[Any] = [ (0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1), (11, 1), (12, 1), (13, 1), (14, 1), (15, 1), (16, 1), (17, 1), (18, 1), (19, 1), ] __UpperCamelCase : Optional[Any] = make_common_ground() __UpperCamelCase : Dict = blocks_blk # hyper parameters __UpperCamelCase : Union[str, Any] = 1 __UpperCamelCase : Union[str, Any] = 1 __UpperCamelCase : Optional[int] = 20 __UpperCamelCase : List[str] = 3 # one consistent and two other inconsistent # start and end destination __UpperCamelCase : str = (0, 0) __UpperCamelCase : str = (n - 1, n - 1) __UpperCamelCase : Optional[Any] = 1 def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = {start: 0, goal: float("""inf""" )} __lowercase = {start: -1, goal: -1} __lowercase = [] __lowercase = set() for i in range(lowerCamelCase ): open_list.append(PriorityQueue() ) open_list[i].put(lowerCamelCase , key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) ) __lowercase = [] __lowercase = [] while open_list[0].minkey() < float("""inf""" ): for i in range(1 , lowerCamelCase ): # print(open_list[0].minkey(), open_list[i].minkey()) if open_list[i].minkey() <= Wa * open_list[0].minkey(): global t t += 1 if g_function[goal] <= open_list[i].minkey(): if g_function[goal] < float("""inf""" ): do_something(lowerCamelCase , lowerCamelCase , lowerCamelCase ) else: __lowercase , __lowercase = open_list[i].top_show() visited.add(lowerCamelCase ) expand_state( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) close_list_inad.append(lowerCamelCase ) else: if g_function[goal] <= open_list[0].minkey(): if g_function[goal] < float("""inf""" ): do_something(lowerCamelCase , lowerCamelCase , lowerCamelCase ) else: __lowercase = open_list[0].top_show() visited.add(lowerCamelCase ) expand_state( lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) close_list_anchor.append(lowerCamelCase ) print("""No path found to goal""" ) print() for i in range(n - 1 , -1 , -1 ): for j in range(lowerCamelCase ): if (j, i) in blocks: print("""#""" , end=""" """ ) elif (j, i) in back_pointer: if (j, i) == (n - 1, n - 1): print("""*""" , end=""" """ ) else: print("""-""" , end=""" """ ) else: print("""*""" , end=""" """ ) if (j, i) == (n - 1, n - 1): print("""<-- End position""" , end=""" """ ) print() print("""^""" ) print("""Start position""" ) print() print("""# is an obstacle""" ) print("""- is the path taken by algorithm""" ) if __name__ == "__main__": multi_a_star(start, goal, n_heuristic)
53
1
import warnings from ...utils import logging from .image_processing_flava import FlavaImageProcessor __UpperCamelCase : int = logging.get_logger(__name__) class __UpperCamelCase ( _lowerCAmelCase ): def __init__( self : Any , *_lowerCAmelCase : Dict , **_lowerCAmelCase : Union[str, Any] ) -> None: """simple docstring""" warnings.warn( """The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use FlavaImageProcessor instead.""" , _lowerCAmelCase , ) super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
53
import argparse import json import pickle from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() __UpperCamelCase : Optional[int] = logging.get_logger(__name__) def snake_case ( lowerCamelCase ): '''simple docstring''' __lowercase = SwinConfig.from_pretrained( """microsoft/swin-tiny-patch4-window7-224""" , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ) __lowercase = MaskFormerConfig(backbone_config=lowerCamelCase ) __lowercase = """huggingface/label-files""" if "ade20k-full" in model_name: # this should be ok __lowercase = 847 __lowercase = """maskformer-ade20k-full-id2label.json""" elif "ade" in model_name: # this should be ok __lowercase = 150 __lowercase = """ade20k-id2label.json""" elif "coco-stuff" in model_name: # this should be ok __lowercase = 171 __lowercase = """maskformer-coco-stuff-id2label.json""" elif "coco" in model_name: # TODO __lowercase = 133 __lowercase = """coco-panoptic-id2label.json""" elif "cityscapes" in model_name: # this should be ok __lowercase = 19 __lowercase = """cityscapes-id2label.json""" elif "vistas" in model_name: # this should be ok __lowercase = 65 __lowercase = """mapillary-vistas-id2label.json""" __lowercase = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""" ) , """r""" ) ) __lowercase = {int(lowerCamelCase ): v for k, v in idalabel.items()} return config def snake_case ( lowerCamelCase ): '''simple docstring''' __lowercase = [] # stem # fmt: off rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") ) rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") ) rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") ) rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') ) if i < 3: rename_keys.append((F'backbone.layers.{i}.downsample.reduction.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') ) rename_keys.append((F'backbone.layers.{i}.downsample.norm.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') ) rename_keys.append((F'backbone.layers.{i}.downsample.norm.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') ) rename_keys.append((F'backbone.norm{i}.weight', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') ) rename_keys.append((F'backbone.norm{i}.bias', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') ) # FPN rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") ) rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") ) rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") ) for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ): rename_keys.append((F'sem_seg_head.adapter_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') ) rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') ) rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') ) rename_keys.append((F'sem_seg_head.layer_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') ) rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') ) rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') ) rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") ) rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") ) # Transformer decoder for idx in range(config.decoder_config.decoder_layers ): # self-attention out projection rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') ) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') ) # cross-attention out projection rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') ) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') ) # MLP 1 rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', F'model.transformer_module.decoder.layers.{idx}.fc1.weight') ) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', F'model.transformer_module.decoder.layers.{idx}.fc1.bias') ) # MLP 2 rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', F'model.transformer_module.decoder.layers.{idx}.fc2.weight') ) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', F'model.transformer_module.decoder.layers.{idx}.fc2.bias') ) # layernorm 1 (self-attention layernorm) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') ) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') ) # layernorm 2 (cross-attention layernorm) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') ) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') ) # layernorm 3 (final layernorm) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') ) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') ) rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") ) rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") ) # heads on top rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") ) rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") ) rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") ) rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") ) rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") ) for i in range(3 ): rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.weight', F'mask_embedder.{i}.0.weight') ) rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.bias', F'mask_embedder.{i}.0.bias') ) # fmt: on return rename_keys def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = dct.pop(lowerCamelCase ) __lowercase = val def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): __lowercase = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) __lowercase = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' ) __lowercase = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict __lowercase = in_proj_weight[:dim, :] __lowercase = in_proj_bias[: dim] __lowercase = in_proj_weight[ dim : dim * 2, : ] __lowercase = in_proj_bias[ dim : dim * 2 ] __lowercase = in_proj_weight[ -dim :, : ] __lowercase = in_proj_bias[-dim :] # fmt: on def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = config.decoder_config.hidden_size for idx in range(config.decoder_config.decoder_layers ): # read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias) __lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' ) __lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict __lowercase = in_proj_weight[: hidden_size, :] __lowercase = in_proj_bias[:config.hidden_size] __lowercase = in_proj_weight[hidden_size : hidden_size * 2, :] __lowercase = in_proj_bias[hidden_size : hidden_size * 2] __lowercase = in_proj_weight[-hidden_size :, :] __lowercase = in_proj_bias[-hidden_size :] # read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias) __lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' ) __lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict __lowercase = in_proj_weight[: hidden_size, :] __lowercase = in_proj_bias[:config.hidden_size] __lowercase = in_proj_weight[hidden_size : hidden_size * 2, :] __lowercase = in_proj_bias[hidden_size : hidden_size * 2] __lowercase = in_proj_weight[-hidden_size :, :] __lowercase = in_proj_bias[-hidden_size :] # fmt: on def snake_case ( ): '''simple docstring''' __lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg""" __lowercase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw ) return im @torch.no_grad() def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = False ): '''simple docstring''' __lowercase = get_maskformer_config(lowerCamelCase ) # load original state_dict with open(lowerCamelCase , """rb""" ) as f: __lowercase = pickle.load(lowerCamelCase ) __lowercase = data["""model"""] # for name, param in state_dict.items(): # print(name, param.shape) # rename keys __lowercase = create_rename_keys(lowerCamelCase ) for src, dest in rename_keys: rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase ) read_in_swin_q_k_v(lowerCamelCase , config.backbone_config ) read_in_decoder_q_k_v(lowerCamelCase , lowerCamelCase ) # update to torch tensors for key, value in state_dict.items(): __lowercase = torch.from_numpy(lowerCamelCase ) # load 🤗 model __lowercase = MaskFormerForInstanceSegmentation(lowerCamelCase ) model.eval() for name, param in model.named_parameters(): print(lowerCamelCase , param.shape ) __lowercase , __lowercase = model.load_state_dict(lowerCamelCase , strict=lowerCamelCase ) assert missing_keys == [ "model.pixel_level_module.encoder.model.layernorm.weight", "model.pixel_level_module.encoder.model.layernorm.bias", ] assert len(lowerCamelCase ) == 0, F'Unexpected keys: {unexpected_keys}' # verify results __lowercase = prepare_img() if "vistas" in model_name: __lowercase = 65 elif "cityscapes" in model_name: __lowercase = 65_535 else: __lowercase = 255 __lowercase = True if """ade""" in model_name else False __lowercase = MaskFormerImageProcessor(ignore_index=lowerCamelCase , reduce_labels=lowerCamelCase ) __lowercase = image_processor(lowerCamelCase , return_tensors="""pt""" ) __lowercase = model(**lowerCamelCase ) print("""Logits:""" , outputs.class_queries_logits[0, :3, :3] ) if model_name == "maskformer-swin-tiny-ade": __lowercase = torch.tensor( [[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] ) assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase , atol=1e-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(F'Saving model and image processor to {pytorch_dump_folder_path}' ) Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase ) model.save_pretrained(lowerCamelCase ) image_processor.save_pretrained(lowerCamelCase ) if push_to_hub: print("""Pushing model and image processor to the hub...""" ) model.push_to_hub(F'nielsr/{model_name}' ) image_processor.push_to_hub(F'nielsr/{model_name}' ) if __name__ == "__main__": __UpperCamelCase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""maskformer-swin-tiny-ade""", type=str, help=("""Name of the MaskFormer model you'd like to convert""",), ) parser.add_argument( """--checkpoint_path""", default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""", type=str, help="""Path to the original state dict (.pth file).""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) __UpperCamelCase : List[Any] = parser.parse_args() convert_maskformer_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
53
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from timm import create_model from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import BitConfig, BitForImageClassification, BitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() __UpperCamelCase : str = logging.get_logger(__name__) def snake_case ( lowerCamelCase ): '''simple docstring''' __lowercase = """huggingface/label-files""" __lowercase = """imagenet-1k-id2label.json""" __lowercase = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""" ) , """r""" ) ) __lowercase = {int(lowerCamelCase ): v for k, v in idalabel.items()} __lowercase = {v: k for k, v in idalabel.items()} __lowercase = """std_conv""" if """bit""" in model_name else False # note that when using BiT as backbone for ViT-hybrid checkpoints, # one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same", # config.conv_layer = "std_conv_same" __lowercase = BitConfig( conv_layer=lowerCamelCase , num_labels=1_000 , idalabel=lowerCamelCase , labelaid=lowerCamelCase , ) return config def snake_case ( lowerCamelCase ): '''simple docstring''' if "stem.conv" in name: __lowercase = name.replace("""stem.conv""" , """bit.embedder.convolution""" ) if "blocks" in name: __lowercase = name.replace("""blocks""" , """layers""" ) if "head.fc" in name: __lowercase = name.replace("""head.fc""" , """classifier.1""" ) if name.startswith("""norm""" ): __lowercase = """bit.""" + name if "bit" not in name and "classifier" not in name: __lowercase = """bit.encoder.""" + name return name def snake_case ( ): '''simple docstring''' __lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg""" __lowercase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw ) return im @torch.no_grad() def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase=False ): '''simple docstring''' __lowercase = get_config(lowerCamelCase ) # load original model from timm __lowercase = create_model(lowerCamelCase , pretrained=lowerCamelCase ) timm_model.eval() # load state_dict of original model __lowercase = timm_model.state_dict() for key in state_dict.copy().keys(): __lowercase = state_dict.pop(lowerCamelCase ) __lowercase = val.squeeze() if """head""" in key else val # load HuggingFace model __lowercase = BitForImageClassification(lowerCamelCase ) model.eval() model.load_state_dict(lowerCamelCase ) # create image processor __lowercase = create_transform(**resolve_data_config({} , model=lowerCamelCase ) ) __lowercase = transform.transforms __lowercase = { """bilinear""": PILImageResampling.BILINEAR, """bicubic""": PILImageResampling.BICUBIC, """nearest""": PILImageResampling.NEAREST, } __lowercase = BitImageProcessor( do_resize=lowerCamelCase , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowerCamelCase , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=lowerCamelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) __lowercase = prepare_img() __lowercase = transform(lowerCamelCase ).unsqueeze(0 ) __lowercase = processor(lowerCamelCase , return_tensors="""pt""" ).pixel_values # verify pixel values assert torch.allclose(lowerCamelCase , lowerCamelCase ) # verify logits with torch.no_grad(): __lowercase = model(lowerCamelCase ) __lowercase = outputs.logits print("""Logits:""" , logits[0, :3] ) print("""Predicted class:""" , model.config.idalabel[logits.argmax(-1 ).item()] ) __lowercase = timm_model(lowerCamelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(lowerCamelCase , outputs.logits , atol=1e-3 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase ) print(F'Saving model {model_name} and processor to {pytorch_dump_folder_path}' ) model.save_pretrained(lowerCamelCase ) processor.save_pretrained(lowerCamelCase ) if push_to_hub: print(F'Pushing model {model_name} and processor to the hub' ) model.push_to_hub(F'ybelkada/{model_name}' ) processor.push_to_hub(F'ybelkada/{model_name}' ) if __name__ == "__main__": __UpperCamelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""resnetv2_50x1_bitm""", type=str, help="""Name of the BiT timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model to the hub.""", ) __UpperCamelCase : str = parser.parse_args() convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
53
from math import sqrt def snake_case ( lowerCamelCase ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(sqrt(lowerCamelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def snake_case ( lowerCamelCase = 10_001 ): '''simple docstring''' __lowercase = 0 __lowercase = 1 while count != nth and number < 3: number += 1 if is_prime(lowerCamelCase ): count += 1 while count != nth: number += 2 if is_prime(lowerCamelCase ): count += 1 return number if __name__ == "__main__": print(F'''{solution() = }''')
53
1
import collections import json import os import re from typing import TYPE_CHECKING, List, Optional, Tuple import numpy as np from ...tokenization_utils_fast import PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) __UpperCamelCase : Dict = {"""vocab_file""": """vocab.txt""", """emoji_file""": """emoji.json"""} __UpperCamelCase : str = { """vocab_file""": { """abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt""", }, """emoji_file""": { """abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json""", }, } __UpperCamelCase : Dict = { """abeja/gpt-neox-japanese-2.7b""": 2048, } def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' with open(lowerCamelCase , """r""" , encoding="""utf-8""" ) as f: __lowercase = json.loads(f.read() ) __lowercase = collections.OrderedDict() __lowercase = collections.OrderedDict() __lowercase = collections.OrderedDict() with open(lowerCamelCase , """r""" , encoding="""utf-8""" ) as f: __lowercase = f.readlines() __lowercase = [[t.rstrip("""\n""" )] if (t == """,""" or """,""" not in t) else t.rstrip("""\n""" ).split(""",""" ) for t in token] for idx, b in enumerate(lowerCamelCase ): __lowercase = b __lowercase = idx for wd in b: __lowercase = idx return vocab, raw_vocab, ids_to_tokens, emoji class __UpperCamelCase ( _lowerCAmelCase ): __snake_case :Optional[int] = VOCAB_FILES_NAMES __snake_case :Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP __snake_case :List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __snake_case :Union[str, Any] = ['input_ids', 'attention_mask'] def __init__( self : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict="<|endoftext|>" , _lowerCAmelCase : Union[str, Any]="<|endoftext|>" , _lowerCAmelCase : Union[str, Any]="<|startoftext|>" , _lowerCAmelCase : Any="<|endoftext|>" , _lowerCAmelCase : Union[str, Any]=False , **_lowerCAmelCase : Dict , ) -> int: """simple docstring""" super().__init__( unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , do_clean_text=_lowerCAmelCase , **_lowerCAmelCase , ) if not os.path.isfile(_lowerCAmelCase ): raise ValueError( F'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained' """ model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" ) if not os.path.isfile(_lowerCAmelCase ): raise ValueError( F'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google' """ pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" ) __lowercase = do_clean_text __lowercase , __lowercase , __lowercase , __lowercase = load_vocab_and_emoji(_lowerCAmelCase , _lowerCAmelCase ) __lowercase = SubWordJapaneseTokenizer( vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji ) @property def _a ( self : List[str] ) -> Dict: """simple docstring""" return len(self.raw_vocab ) def _a ( self : List[str] ) -> int: """simple docstring""" return dict(self.raw_vocab , **self.added_tokens_encoder ) def _a ( self : Any , _lowerCAmelCase : List[Any] ) -> Any: """simple docstring""" return self.subword_tokenizer.tokenize(_lowerCAmelCase , clean=self.do_clean_text ) def _a ( self : Tuple , _lowerCAmelCase : int ) -> List[Any]: """simple docstring""" return self.vocab.get(_lowerCAmelCase , self.vocab.get(self.unk_token ) ) def _a ( self : str , _lowerCAmelCase : Union[str, Any] ) -> str: """simple docstring""" return self.subword_tokenizer.convert_id_to_token(_lowerCAmelCase ) def _a ( self : Union[str, Any] , _lowerCAmelCase : Dict ) -> Optional[Any]: """simple docstring""" __lowercase = """""".join(_lowerCAmelCase ).strip() return out_string def _a ( self : Tuple , _lowerCAmelCase : "Conversation" ) -> List[int]: """simple docstring""" __lowercase = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) + [self.eos_token_id] ) if len(_lowerCAmelCase ) > self.model_max_length: __lowercase = input_ids[-self.model_max_length :] return input_ids def _a ( self : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ) -> Tuple[str]: """simple docstring""" __lowercase = 0 if os.path.isdir(_lowerCAmelCase ): __lowercase = os.path.join( _lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) __lowercase = os.path.join( _lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""emoji_file"""] ) else: __lowercase = ( (filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""vocab_file"""] ) __lowercase = ( (filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""emoji_file"""] ) with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as writer: for token_index, token in self.ids_to_tokens.items(): if index != token_index: logger.warning( F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.' """ Please check that the vocabulary is not corrupted!""" ) __lowercase = token_index writer.write(""",""".join(_lowerCAmelCase ) + """\n""" ) index += 1 with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as writer: json.dump(self.emoji , _lowerCAmelCase ) return vocab_file, emoji_file class __UpperCamelCase ( _lowerCAmelCase ): def __init__( self : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Any ) -> List[Any]: """simple docstring""" __lowercase = vocab # same as swe __lowercase = ids_to_tokens # same as bpe __lowercase = emoji __lowercase = np.max([len(_lowerCAmelCase ) for w in self.vocab.keys()] ) __lowercase = re.compile(r"""(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)""" ) __lowercase = re.compile(r"""[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*""" ) __lowercase = re.compile(r"""[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}""" ) __lowercase = re.compile( r"""([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" ) __lowercase = re.compile( r"""(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" ) __lowercase = re.compile( r"""((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*""" ) __lowercase = """─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿""" __lowercase = """▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟""" __lowercase = str.maketrans({k: """<BLOCK>""" for k in keisen + blocks} ) def __len__( self : Any ) -> Optional[Any]: """simple docstring""" return len(self.ids_to_tokens ) def _a ( self : Union[str, Any] , _lowerCAmelCase : List[str] ) -> int: """simple docstring""" __lowercase = self.content_repattera.sub("""<URL>""" , _lowerCAmelCase ) __lowercase = self.content_repattera.sub("""<EMAIL>""" , _lowerCAmelCase ) __lowercase = self.content_repattera.sub("""<TEL>""" , _lowerCAmelCase ) __lowercase = self.content_repattera.sub("""<DATE>""" , _lowerCAmelCase ) __lowercase = self.content_repattera.sub("""<DATE>""" , _lowerCAmelCase ) __lowercase = self.content_repattera.sub("""<PRICE>""" , _lowerCAmelCase ) __lowercase = content.translate(self.content_transa ) while "<BLOCK><BLOCK>" in content: __lowercase = content.replace("""<BLOCK><BLOCK>""" , """<BLOCK>""" ) return content def _a ( self : List[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any]=False ) -> Optional[Any]: """simple docstring""" __lowercase = text.replace(""" """ , """<SP>""" ) __lowercase = text.replace(""" """ , """<SP>""" ) __lowercase = text.replace("""\r\n""" , """<BR>""" ) __lowercase = text.replace("""\n""" , """<BR>""" ) __lowercase = text.replace("""\r""" , """<BR>""" ) __lowercase = text.replace("""\t""" , """<TAB>""" ) __lowercase = text.replace("""—""" , """ー""" ) __lowercase = text.replace("""−""" , """ー""" ) for k, v in self.emoji["emoji"].items(): if k in text: __lowercase = text.replace(_lowerCAmelCase , _lowerCAmelCase ) if clean: __lowercase = self.clean_text(_lowerCAmelCase ) def check_simbol(_lowerCAmelCase : Union[str, Any] ): __lowercase = x.encode() if len(_lowerCAmelCase ) == 1 and len(_lowerCAmelCase ) == 2: __lowercase = (int(e[0] ) << 8) + int(e[1] ) if ( (c >= 0Xc2a1 and c <= 0Xc2bf) or (c >= 0Xc780 and c <= 0Xc783) or (c >= 0Xcab9 and c <= 0Xcbbf) or (c >= 0Xcc80 and c <= 0Xcda2) ): return True return False def checkuae(_lowerCAmelCase : Tuple ): __lowercase = x.encode() if len(_lowerCAmelCase ) == 1 and len(_lowerCAmelCase ) == 3: __lowercase = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] ) if c >= 0Xe2_8080 and c <= 0Xe2_b07f: return True return False __lowercase = 0 __lowercase = [] while pos < len(_lowerCAmelCase ): __lowercase = min(len(_lowerCAmelCase ) , pos + self.maxlen + 1 ) if text[pos] == """<""" else pos + 3 __lowercase = [] # (token_id, token, pos) for e in range(_lowerCAmelCase , _lowerCAmelCase , -1 ): __lowercase = text[pos:e] if wd in self.vocab: if wd[0] == "<" and len(_lowerCAmelCase ) > 2: __lowercase = [(self.vocab[wd], wd, e)] break else: candidates.append((self.vocab[wd], wd, e) ) if len(_lowerCAmelCase ) > 0: # the smallest token_id is adopted __lowercase , __lowercase , __lowercase = sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[0] )[0] result.append(_lowerCAmelCase ) __lowercase = e else: __lowercase = pos + 1 __lowercase = text[pos:end] if check_simbol(_lowerCAmelCase ): result.append("""<KIGOU>""" ) elif checkuae(_lowerCAmelCase ): result.append("""<U2000U2BFF>""" ) else: for i in wd.encode("""utf-8""" ): result.append("""<|byte%d|>""" % i ) __lowercase = end return result def _a ( self : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int]="\n" ) -> Optional[int]: """simple docstring""" __lowercase = [] __lowercase = [] __lowercase = self.ids_to_tokens[index][0] if word[:6] == "<|byte" and word[-2:] == "|>": byte_tokens.append(int(word[6:-2] ) ) else: if len(_lowerCAmelCase ) > 0: words.append(bytearray(_lowerCAmelCase ).decode("""utf-8""" , errors="""replace""" ) ) __lowercase = [] if word[:7] == "<|emoji" and word[-2:] == "|>": words.append(self.emoji["""emoji_inv"""][word] ) elif word == "<SP>": words.append(""" """ ) elif word == "<BR>": words.append(_lowerCAmelCase ) elif word == "<TAB>": words.append("""\t""" ) elif word == "<BLOCK>": words.append("""▀""" ) elif word == "<KIGOU>": words.append("""ǀ""" ) elif word == "<U2000U2BFF>": words.append("""‖""" ) else: words.append(_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: words.append(bytearray(_lowerCAmelCase ).decode("""utf-8""" , errors="""replace""" ) ) __lowercase = """""".join(_lowerCAmelCase ) return text
53
from __future__ import annotations import collections import tempfile import unittest import numpy as np from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import is_tf_available, is_vision_available from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_tf_bert import TFBertModelTester from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester from ..deit.test_modeling_tf_deit import TFDeiTModelTester from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester from ..vit.test_modeling_tf_vit import TFViTModelTester if is_tf_available(): from transformers import ( TFBertModel, TFCLIPVisionModel, TFDeiTModel, TFRobertaModel, TFVisionTextDualEncoderModel, TFViTModel, VisionTextDualEncoderConfig, ) if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor def snake_case ( lowerCamelCase ): '''simple docstring''' if isinstance(lowerCamelCase , collections.abc.Iterable ): return x return (x, x) @require_tf class __UpperCamelCase : def _a ( self : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] ) -> Union[str, Any]: """simple docstring""" pass def _a ( self : Dict ) -> Optional[int]: """simple docstring""" pass def _a ( self : Any ) -> Optional[Any]: """simple docstring""" pass def _a ( self : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict=None , **_lowerCAmelCase : int ) -> str: """simple docstring""" __lowercase = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCAmelCase , _lowerCAmelCase ) __lowercase = TFVisionTextDualEncoderModel(_lowerCAmelCase ) __lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) ) def _a ( self : str , _lowerCAmelCase : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : int=None , **_lowerCAmelCase : Dict ) -> Tuple: """simple docstring""" __lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase ) __lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase ) __lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) ) def _a ( self : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : str , _lowerCAmelCase : Any=None , **_lowerCAmelCase : str ) -> Tuple: """simple docstring""" __lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase ) __lowercase = {"""vision_model""": vision_model, """text_model""": text_model} __lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCAmelCase ) __lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) ) def _a ( self : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any]=None , **_lowerCAmelCase : Any ) -> Optional[int]: """simple docstring""" __lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase ) __lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase ) __lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase ) __lowercase = output[0].numpy() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_lowerCAmelCase ) __lowercase = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase ) __lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase ) __lowercase = after_output[0].numpy() __lowercase = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_lowerCAmelCase , 1e-5 ) def _a ( self : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int]=None , **_lowerCAmelCase : Optional[int] ) -> List[Any]: """simple docstring""" __lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase ) __lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase ) __lowercase = model( input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase ) __lowercase = output.vision_model_output.attentions self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) __lowercase = to_atuple(vision_model.config.image_size ) __lowercase = to_atuple(vision_model.config.patch_size ) __lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) __lowercase = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) __lowercase = output.text_model_output.attentions self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def _a ( self : List[Any] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : float ) -> Optional[int]: """simple docstring""" __lowercase = np.abs((a - b) ).max() self.assertLessEqual(_lowerCAmelCase , _lowerCAmelCase , F'Difference between torch and flax is {diff} (>= {tol}).' ) def _a ( self : List[Any] ) -> List[Any]: """simple docstring""" __lowercase = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_model(**_lowerCAmelCase ) def _a ( self : int ) -> List[Any]: """simple docstring""" __lowercase = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**_lowerCAmelCase ) def _a ( self : Union[str, Any] ) -> int: """simple docstring""" __lowercase = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**_lowerCAmelCase ) def _a ( self : List[str] ) -> str: """simple docstring""" __lowercase = self.prepare_config_and_inputs() self.check_save_load(**_lowerCAmelCase ) def _a ( self : Dict ) -> Union[str, Any]: """simple docstring""" __lowercase = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**_lowerCAmelCase ) @slow def _a ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" __lowercase , __lowercase = self.get_pretrained_model_and_inputs() __lowercase = model_a(**_lowerCAmelCase ) __lowercase = outputs[0].numpy() with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(_lowerCAmelCase ) __lowercase = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase ) __lowercase = model_a(**_lowerCAmelCase ) __lowercase = after_outputs[0].numpy() __lowercase = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_lowerCAmelCase , 1e-5 ) @require_tf class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ): def _a ( self : Optional[int] ) -> List[str]: """simple docstring""" __lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" ) __lowercase = 13 __lowercase = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) __lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) __lowercase = random_attention_mask([batch_size, 4] ) __lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def _a ( self : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] ) -> List[str]: """simple docstring""" __lowercase = TFViTModel(_lowerCAmelCase , name="""vision_model""" ) __lowercase = TFBertModel(_lowerCAmelCase , name="""text_model""" ) return vision_model, text_model def _a ( self : Dict ) -> Optional[Any]: """simple docstring""" __lowercase = TFViTModelTester(self ) __lowercase = TFBertModelTester(self ) __lowercase = vit_model_tester.prepare_config_and_inputs() __lowercase = bert_model_tester.prepare_config_and_inputs() __lowercase , __lowercase , __lowercase = vision_config_and_inputs ( ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ): def _a ( self : Tuple ) -> Any: """simple docstring""" __lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained( """Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" ) __lowercase = 13 __lowercase = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) __lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) __lowercase = random_attention_mask([batch_size, 4] ) __lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def _a ( self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : int=None , **_lowerCAmelCase : Tuple ) -> Dict: """simple docstring""" __lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase ) __lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase ) __lowercase = model( input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase ) __lowercase = output.vision_model_output.attentions self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers ) # in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) __lowercase = to_atuple(vision_model.config.image_size ) __lowercase = to_atuple(vision_model.config.patch_size ) __lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) __lowercase = num_patches + 2 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) __lowercase = output.text_model_output.attentions self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def _a ( self : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict ) -> int: """simple docstring""" __lowercase = TFDeiTModel(_lowerCAmelCase , name="""vision_model""" ) __lowercase = TFRobertaModel(_lowerCAmelCase , name="""text_model""" ) return vision_model, text_model def _a ( self : Tuple ) -> str: """simple docstring""" __lowercase = TFDeiTModelTester(self ) __lowercase = TFRobertaModelTester(self ) __lowercase = vit_model_tester.prepare_config_and_inputs() __lowercase = bert_model_tester.prepare_config_and_inputs() __lowercase , __lowercase , __lowercase = vision_config_and_inputs ( ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ): def _a ( self : int ) -> Union[str, Any]: """simple docstring""" __lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained( """Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" ) __lowercase = 13 __lowercase = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) __lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) __lowercase = random_attention_mask([batch_size, 4] ) __lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def _a ( self : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] ) -> Dict: """simple docstring""" __lowercase = TFCLIPVisionModel(_lowerCAmelCase , name="""vision_model""" ) __lowercase = TFBertModel(_lowerCAmelCase , name="""text_model""" ) return vision_model, text_model def _a ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" __lowercase = TFCLIPVisionModelTester(self ) __lowercase = TFBertModelTester(self ) __lowercase = clip_model_tester.prepare_config_and_inputs() __lowercase = bert_model_tester.prepare_config_and_inputs() __lowercase , __lowercase = vision_config_and_inputs ( ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_vision @require_tf class __UpperCamelCase ( unittest.TestCase ): @slow def _a ( self : int ) -> Tuple: """simple docstring""" __lowercase = TFVisionTextDualEncoderModel.from_pretrained( """clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=_lowerCAmelCase ) __lowercase = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" ) __lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) __lowercase = processor( text=["""una foto di un gatto""", """una foto di un cane"""] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="""np""" ) __lowercase = model(**_lowerCAmelCase ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) __lowercase = np.array([[1.2_284_727, 0.3_104_122]] ) self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _lowerCAmelCase , atol=1e-3 ) )
53
1
import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ): __snake_case :List[Any] = DiTPipeline __snake_case :Any = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS __snake_case :Tuple = PipelineTesterMixin.required_optional_params - { 'latents', 'num_images_per_prompt', 'callback', 'callback_steps', } __snake_case :str = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS __snake_case :List[Any] = False def _a ( self : str ) -> Any: """simple docstring""" torch.manual_seed(0 ) __lowercase = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_lowerCAmelCase , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=_lowerCAmelCase , ) __lowercase = AutoencoderKL() __lowercase = DDIMScheduler() __lowercase = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler} return components def _a ( self : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple=0 ) -> Optional[Any]: """simple docstring""" if str(_lowerCAmelCase ).startswith("""mps""" ): __lowercase = torch.manual_seed(_lowerCAmelCase ) else: __lowercase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase ) __lowercase = { """class_labels""": [1], """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def _a ( self : str ) -> Any: """simple docstring""" __lowercase = """cpu""" __lowercase = self.get_dummy_components() __lowercase = self.pipeline_class(**_lowerCAmelCase ) pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) __lowercase = self.get_dummy_inputs(_lowerCAmelCase ) __lowercase = pipe(**_lowerCAmelCase ).images __lowercase = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) __lowercase = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] ) __lowercase = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(_lowerCAmelCase , 1e-3 ) def _a ( self : List[str] ) -> str: """simple docstring""" self._test_inference_batch_single_identical(relax_max_difference=_lowerCAmelCase , expected_max_diff=1e-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def _a ( self : int ) -> Dict: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) @require_torch_gpu @slow class __UpperCamelCase ( unittest.TestCase ): def _a ( self : int ) -> int: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _a ( self : Optional[Any] ) -> Any: """simple docstring""" __lowercase = torch.manual_seed(0 ) __lowercase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" ) pipe.to("""cuda""" ) __lowercase = ["""vase""", """umbrella""", """white shark""", """white wolf"""] __lowercase = pipe.get_label_ids(_lowerCAmelCase ) __lowercase = pipe(_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=40 , output_type="""np""" ).images for word, image in zip(_lowerCAmelCase , _lowerCAmelCase ): __lowercase = load_numpy( F'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' ) assert np.abs((expected_image - image).max() ) < 1e-2 def _a ( self : List[str] ) -> Optional[Any]: """simple docstring""" __lowercase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" ) __lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to("""cuda""" ) __lowercase = ["""vase""", """umbrella"""] __lowercase = pipe.get_label_ids(_lowerCAmelCase ) __lowercase = torch.manual_seed(0 ) __lowercase = pipe(_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=25 , output_type="""np""" ).images for word, image in zip(_lowerCAmelCase , _lowerCAmelCase ): __lowercase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" F'/dit/{word}_512.npy' ) assert np.abs((expected_image - image).max() ) < 1e-1
53
from __future__ import annotations from scipy.special import comb # type: ignore class __UpperCamelCase : def __init__( self : int , _lowerCAmelCase : list[tuple[float, float]] ) -> Any: """simple docstring""" __lowercase = list_of_points # Degree determines the flexibility of the curve. # Degree = 1 will produce a straight line. __lowercase = len(_lowerCAmelCase ) - 1 def _a ( self : Tuple , _lowerCAmelCase : float ) -> list[float]: """simple docstring""" assert 0 <= t <= 1, "Time t must be between 0 and 1." __lowercase = [] for i in range(len(self.list_of_points ) ): # basis function for each i output_values.append( comb(self.degree , _lowerCAmelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) ) # the basis must sum up to 1 for it to produce a valid Bezier curve. assert round(sum(_lowerCAmelCase ) , 5 ) == 1 return output_values def _a ( self : List[str] , _lowerCAmelCase : float ) -> tuple[float, float]: """simple docstring""" assert 0 <= t <= 1, "Time t must be between 0 and 1." __lowercase = self.basis_function(_lowerCAmelCase ) __lowercase = 0.0 __lowercase = 0.0 for i in range(len(self.list_of_points ) ): # For all points, sum up the product of i-th basis function and i-th point. x += basis_function[i] * self.list_of_points[i][0] y += basis_function[i] * self.list_of_points[i][1] return (x, y) def _a ( self : Optional[int] , _lowerCAmelCase : float = 0.01 ) -> Union[str, Any]: """simple docstring""" from matplotlib import pyplot as plt # type: ignore __lowercase = [] # x coordinates of points to plot __lowercase = [] # y coordinates of points to plot __lowercase = 0.0 while t <= 1: __lowercase = self.bezier_curve_function(_lowerCAmelCase ) to_plot_x.append(value[0] ) to_plot_y.append(value[1] ) t += step_size __lowercase = [i[0] for i in self.list_of_points] __lowercase = [i[1] for i in self.list_of_points] plt.plot( _lowerCAmelCase , _lowerCAmelCase , color="""blue""" , label="""Curve of Degree """ + str(self.degree ) , ) plt.scatter(_lowerCAmelCase , _lowerCAmelCase , color="""red""" , label="""Control Points""" ) plt.legend() plt.show() if __name__ == "__main__": import doctest doctest.testmod() BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1 BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2 BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
53
1
from typing import Tuple, Union from ...modeling_outputs import BackboneOutput from ...modeling_utils import PreTrainedModel from ...utils import is_timm_available, is_torch_available, requires_backends from ...utils.backbone_utils import BackboneMixin from .configuration_timm_backbone import TimmBackboneConfig if is_timm_available(): import timm if is_torch_available(): from torch import Tensor class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ): __snake_case :int = 'pixel_values' __snake_case :Union[str, Any] = False __snake_case :int = TimmBackboneConfig def __init__( self : Tuple , _lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : Dict ) -> Tuple: """simple docstring""" requires_backends(self , """timm""" ) super().__init__(_lowerCAmelCase ) __lowercase = config if config.backbone is None: raise ValueError("""backbone is not set in the config. Please set it to a timm model name.""" ) if config.backbone not in timm.list_models(): raise ValueError(F'backbone {config.backbone} is not supported by timm.' ) if hasattr(_lowerCAmelCase , """out_features""" ) and config.out_features is not None: raise ValueError("""out_features is not supported by TimmBackbone. Please use out_indices instead.""" ) __lowercase = getattr(_lowerCAmelCase , """use_pretrained_backbone""" , _lowerCAmelCase ) if pretrained is None: raise ValueError("""use_pretrained_backbone is not set in the config. Please set it to True or False.""" ) # We just take the final layer by default. This matches the default for the transformers models. __lowercase = config.out_indices if getattr(_lowerCAmelCase , """out_indices""" , _lowerCAmelCase ) is not None else (-1,) __lowercase = timm.create_model( config.backbone , pretrained=_lowerCAmelCase , features_only=config.features_only , in_chans=config.num_channels , out_indices=_lowerCAmelCase , **_lowerCAmelCase , ) # These are used to control the output of the model when called. If output_hidden_states is True, then # return_layers is modified to include all layers. __lowercase = self._backbone.return_layers __lowercase = {layer["""module"""]: str(_lowerCAmelCase ) for i, layer in enumerate(self._backbone.feature_info.info )} super()._init_backbone(_lowerCAmelCase ) @classmethod def _a ( cls : List[str] , _lowerCAmelCase : Optional[int] , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : Any ) -> int: """simple docstring""" requires_backends(cls , ["""vision""", """timm"""] ) from ...models.timm_backbone import TimmBackboneConfig __lowercase = kwargs.pop("""config""" , TimmBackboneConfig() ) __lowercase = kwargs.pop("""use_timm_backbone""" , _lowerCAmelCase ) if not use_timm: raise ValueError("""use_timm_backbone must be True for timm backbones""" ) __lowercase = kwargs.pop("""num_channels""" , config.num_channels ) __lowercase = kwargs.pop("""features_only""" , config.features_only ) __lowercase = kwargs.pop("""use_pretrained_backbone""" , config.use_pretrained_backbone ) __lowercase = kwargs.pop("""out_indices""" , config.out_indices ) __lowercase = TimmBackboneConfig( backbone=_lowerCAmelCase , num_channels=_lowerCAmelCase , features_only=_lowerCAmelCase , use_pretrained_backbone=_lowerCAmelCase , out_indices=_lowerCAmelCase , ) return super()._from_config(_lowerCAmelCase , **_lowerCAmelCase ) def _a ( self : str , _lowerCAmelCase : Any ) -> Dict: """simple docstring""" pass def _a ( self : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : str=None , _lowerCAmelCase : List[Any]=None , **_lowerCAmelCase : Union[str, Any] ) -> Union[BackboneOutput, Tuple[Tensor, ...]]: """simple docstring""" __lowercase = return_dict if return_dict is not None else self.config.use_return_dict __lowercase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __lowercase = output_attentions if output_attentions is not None else self.config.output_attentions if output_attentions: raise ValueError("""Cannot output attentions for timm backbones at the moment""" ) if output_hidden_states: # We modify the return layers to include all the stages of the backbone __lowercase = self._all_layers __lowercase = self._backbone(_lowerCAmelCase , **_lowerCAmelCase ) __lowercase = self._return_layers __lowercase = tuple(hidden_states[i] for i in self.out_indices ) else: __lowercase = self._backbone(_lowerCAmelCase , **_lowerCAmelCase ) __lowercase = None __lowercase = tuple(_lowerCAmelCase ) __lowercase = tuple(_lowerCAmelCase ) if hidden_states is not None else None if not return_dict: __lowercase = (feature_maps,) if output_hidden_states: __lowercase = output + (hidden_states,) return output return BackboneOutput(feature_maps=_lowerCAmelCase , hidden_states=_lowerCAmelCase , attentions=_lowerCAmelCase )
53
import inspect import unittest from typing import List import numpy as np from transformers import EfficientFormerConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, ) from transformers.models.efficientformer.modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_vision_available(): from PIL import Image from transformers import EfficientFormerImageProcessor class __UpperCamelCase : def __init__( self : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : int = 13 , _lowerCAmelCase : int = 64 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 3 , _lowerCAmelCase : int = 3 , _lowerCAmelCase : bool = True , _lowerCAmelCase : bool = True , _lowerCAmelCase : int = 128 , _lowerCAmelCase : Optional[int]=[16, 32, 64, 128] , _lowerCAmelCase : int = 7 , _lowerCAmelCase : int = 4 , _lowerCAmelCase : int = 37 , _lowerCAmelCase : str = "gelu" , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : int = 10 , _lowerCAmelCase : float = 0.02 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 128 , _lowerCAmelCase : List[int] = [2, 2, 2, 2] , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 2 , ) -> Tuple: """simple docstring""" __lowercase = parent __lowercase = batch_size __lowercase = image_size __lowercase = patch_size __lowercase = num_channels __lowercase = is_training __lowercase = use_labels __lowercase = hidden_size __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = intermediate_size __lowercase = hidden_act __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = type_sequence_label_size __lowercase = initializer_range __lowercase = encoder_stride __lowercase = num_attention_outputs __lowercase = embed_dim __lowercase = embed_dim + 1 __lowercase = resolution __lowercase = depths __lowercase = hidden_sizes __lowercase = dim __lowercase = mlp_expansion_ratio def _a ( self : List[Any] ) -> Optional[int]: """simple docstring""" __lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowercase = None if self.use_labels: __lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase = self.get_config() return config, pixel_values, labels def _a ( self : Optional[Any] ) -> str: """simple docstring""" return EfficientFormerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , ) def _a ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] ) -> Optional[int]: """simple docstring""" __lowercase = TFEfficientFormerModel(config=_lowerCAmelCase ) __lowercase = model(_lowerCAmelCase , training=_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict ) -> Optional[Any]: """simple docstring""" __lowercase = self.type_sequence_label_size __lowercase = TFEfficientFormerForImageClassification(_lowerCAmelCase ) __lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images __lowercase = 1 __lowercase = TFEfficientFormerForImageClassification(_lowerCAmelCase ) __lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _a ( self : Optional[Any] ) -> Any: """simple docstring""" __lowercase = self.prepare_config_and_inputs() __lowercase , __lowercase , __lowercase = config_and_inputs __lowercase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): __snake_case :Any = ( ( TFEfficientFormerModel, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerForImageClassification, ) if is_tf_available() else () ) __snake_case :Any = ( { 'feature-extraction': TFEfficientFormerModel, 'image-classification': ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, ), } if is_tf_available() else {} ) __snake_case :int = False __snake_case :Optional[int] = False __snake_case :int = False __snake_case :Any = False __snake_case :Any = False def _a ( self : Tuple ) -> Tuple: """simple docstring""" __lowercase = TFEfficientFormerModelTester(self ) __lowercase = ConfigTester( self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 ) def _a ( self : Optional[int] ) -> int: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="""EfficientFormer does not use inputs_embeds""" ) def _a ( self : Optional[int] ) -> Any: """simple docstring""" pass @unittest.skip(reason="""EfficientFormer does not support input and output embeddings""" ) def _a ( self : int ) -> str: """simple docstring""" pass def _a ( self : Union[str, Any] ) -> List[str]: """simple docstring""" __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase = model_class(_lowerCAmelCase ) __lowercase = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase = [*signature.parameters.keys()] __lowercase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , _lowerCAmelCase ) def _a ( self : Union[str, Any] ) -> Dict: """simple docstring""" def check_hidden_states_output(_lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] ): __lowercase = model_class(_lowerCAmelCase ) __lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase ) __lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __lowercase = getattr( self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase ) if hasattr(self.model_tester , """encoder_seq_length""" ): __lowercase = self.model_tester.encoder_seq_length if hasattr(self.model_tester , """chunk_length""" ) and self.model_tester.chunk_length > 1: __lowercase = seq_length * self.model_tester.chunk_length else: __lowercase = self.model_tester.seq_length self.assertListEqual( list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) if config.is_encoder_decoder: __lowercase = outputs.decoder_hidden_states self.asseretIsInstance(_lowerCAmelCase , (list, tuple) ) self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase ) __lowercase = getattr(self.model_tester , """seq_length""" , _lowerCAmelCase ) __lowercase = getattr(self.model_tester , """decoder_seq_length""" , _lowerCAmelCase ) self.assertListEqual( list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , ) __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase = True check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowercase = True check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) def _a ( self : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any]=False ) -> Dict: """simple docstring""" __lowercase = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase ) if return_labels: if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def _a ( self : int ) -> int: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) @unittest.skip(reason="""EfficientFormer does not implement masked image modeling yet""" ) def _a ( self : Union[str, Any] ) -> str: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCAmelCase ) def _a ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase ) @slow def _a ( self : List[str] ) -> List[Any]: """simple docstring""" for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase = TFEfficientFormerModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) def _a ( self : Any ) -> List[str]: """simple docstring""" __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() __lowercase = True __lowercase = getattr(self.model_tester , """seq_length""" , _lowerCAmelCase ) __lowercase = getattr(self.model_tester , """encoder_seq_length""" , _lowerCAmelCase ) __lowercase = getattr(self.model_tester , """key_length""" , _lowerCAmelCase ) __lowercase = getattr(self.model_tester , """chunk_length""" , _lowerCAmelCase ) if chunk_length is not None and hasattr(self.model_tester , """num_hashes""" ): __lowercase = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: __lowercase = True __lowercase = False __lowercase = True __lowercase = model_class(_lowerCAmelCase ) __lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase ) __lowercase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_attention_outputs ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __lowercase = True __lowercase = model_class(_lowerCAmelCase ) __lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase ) __lowercase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_attention_outputs ) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , ) else: self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , ) def _a ( self : Dict ) -> Optional[int]: """simple docstring""" __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Prepare our model __lowercase = model_class(_lowerCAmelCase ) # These are maximally general inputs for the model, with multiple None dimensions # Hopefully this will catch any conditionals that fail for flexible shapes __lowercase = { key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=_lowerCAmelCase ) for key, val in model.input_signature.items() if key in model.dummy_inputs } __lowercase = model(_lowerCAmelCase ) self.assertTrue(outputs_dict is not None ) def snake_case ( ): '''simple docstring''' __lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class __UpperCamelCase ( unittest.TestCase ): @cached_property def _a ( self : Optional[Any] ) -> Any: """simple docstring""" return ( EfficientFormerImageProcessor.from_pretrained("""snap-research/efficientformer-l1-300""" ) if is_vision_available() else None ) @slow def _a ( self : Optional[Any] ) -> Any: """simple docstring""" __lowercase = TFEfficientFormerForImageClassification.from_pretrained("""snap-research/efficientformer-l1-300""" ) __lowercase = self.default_image_processor __lowercase = prepare_img() __lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""tf""" ) # forward pass __lowercase = model(**_lowerCAmelCase , training=_lowerCAmelCase ) # verify the logits __lowercase = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , _lowerCAmelCase ) __lowercase = tf.constant([-0.0_555, 0.4_825, -0.0_852] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) ) @slow def _a ( self : Optional[Any] ) -> Tuple: """simple docstring""" __lowercase = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained( """snap-research/efficientformer-l1-300""" ) __lowercase = self.default_image_processor __lowercase = prepare_img() __lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""tf""" ) # forward pass __lowercase = model(**_lowerCAmelCase , training=_lowerCAmelCase ) # verify the logits __lowercase = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , _lowerCAmelCase ) __lowercase = tf.constant([-0.1_312, 0.4_353, -1.0_499] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
53
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) __UpperCamelCase : Dict = { """configuration_speecht5""": [ """SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP""", """SpeechT5Config""", """SpeechT5HifiGanConfig""", ], """feature_extraction_speecht5""": ["""SpeechT5FeatureExtractor"""], """processing_speecht5""": ["""SpeechT5Processor"""], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : List[str] = ["""SpeechT5Tokenizer"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : List[Any] = [ """SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST""", """SpeechT5ForSpeechToText""", """SpeechT5ForSpeechToSpeech""", """SpeechT5ForTextToSpeech""", """SpeechT5Model""", """SpeechT5PreTrainedModel""", """SpeechT5HifiGan""", ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys __UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
53
import argparse import json import os import re import shutil import torch from transformers import BioGptConfig, BioGptForCausalLM from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() __UpperCamelCase : Tuple = 2 class __UpperCamelCase : def __init__( self : List[str] , *, # begin keyword-only arguments _lowerCAmelCase : Optional[int]="<s>" , _lowerCAmelCase : Optional[int]="<pad>" , _lowerCAmelCase : int="</s>" , _lowerCAmelCase : str="<unk>" , _lowerCAmelCase : List[str]=None , ) -> Tuple: """simple docstring""" __lowercase , __lowercase , __lowercase , __lowercase = bos, unk, pad, eos __lowercase = [] __lowercase = [] __lowercase = {} __lowercase = self.add_symbol(_lowerCAmelCase ) __lowercase = self.add_symbol(_lowerCAmelCase ) __lowercase = self.add_symbol(_lowerCAmelCase ) __lowercase = self.add_symbol(_lowerCAmelCase ) if extra_special_symbols: for s in extra_special_symbols: self.add_symbol(_lowerCAmelCase ) __lowercase = len(self.symbols ) def __eq__( self : Dict , _lowerCAmelCase : List[str] ) -> Any: """simple docstring""" return self.indices == other.indices def __getitem__( self : Any , _lowerCAmelCase : str ) -> Dict: """simple docstring""" if idx < len(self.symbols ): return self.symbols[idx] return self.unk_word def __len__( self : str ) -> List[str]: """simple docstring""" return len(self.symbols ) def __contains__( self : Union[str, Any] , _lowerCAmelCase : List[Any] ) -> Optional[int]: """simple docstring""" return sym in self.indices @classmethod def _a ( cls : Dict , _lowerCAmelCase : Optional[int] ) -> Union[str, Any]: """simple docstring""" __lowercase = cls() d.add_from_file(_lowerCAmelCase ) return d def _a ( self : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any]=1 , _lowerCAmelCase : Optional[int]=False ) -> Union[str, Any]: """simple docstring""" if word in self.indices and not overwrite: __lowercase = self.indices[word] __lowercase = self.count[idx] + n return idx else: __lowercase = len(self.symbols ) __lowercase = idx self.symbols.append(_lowerCAmelCase ) self.count.append(_lowerCAmelCase ) return idx def _a ( self : List[str] , _lowerCAmelCase : Optional[int] ) -> Optional[Any]: """simple docstring""" return 0 def _a ( self : Optional[Any] , _lowerCAmelCase : Dict ) -> str: """simple docstring""" if isinstance(_lowerCAmelCase , _lowerCAmelCase ): try: with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" ) as fd: self.add_from_file(_lowerCAmelCase ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception("""Incorrect encoding detected in {}, please rebuild the dataset""".format(_lowerCAmelCase ) ) return __lowercase = f.readlines() __lowercase = self._load_meta(_lowerCAmelCase ) for line in lines[indices_start_line:]: try: __lowercase , __lowercase = line.rstrip().rsplit(""" """ , 1 ) if field == "#fairseq:overwrite": __lowercase = True __lowercase , __lowercase = line.rsplit(""" """ , 1 ) else: __lowercase = False __lowercase = int(_lowerCAmelCase ) __lowercase = line if word in self and not overwrite: raise RuntimeError( """Duplicate word found when loading Dictionary: '{}'. """ """Duplicate words can overwrite earlier ones by adding the """ """#fairseq:overwrite flag at the end of the corresponding row """ """in the dictionary file. If using the Camembert model, please """ """download an updated copy of the model file.""".format(_lowerCAmelCase ) ) self.add_symbol(_lowerCAmelCase , n=_lowerCAmelCase , overwrite=_lowerCAmelCase ) except ValueError: raise ValueError("""Incorrect dictionary format, expected '<token> <cnt> [flags]'""" ) def snake_case ( lowerCamelCase ): '''simple docstring''' __lowercase = dict((re.sub(r"""@@$""" , """""" , lowerCamelCase ), v) if k.endswith("""@@""" ) else (re.sub(r"""$""" , """</w>""" , lowerCamelCase ), v) for k, v in d.items() ) __lowercase = """<s> <pad> </s> <unk>""".split() # restore the special tokens for k in keep_keys: del da[F'{k}</w>'] __lowercase = d[k] # restore return da def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' if not os.path.exists(lowerCamelCase ): raise ValueError(F'path {biogpt_checkpoint_path} does not exist!' ) os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase ) print(F'Writing results to {pytorch_dump_folder_path}' ) # handle various types of models __lowercase = os.path.join(lowerCamelCase , """checkpoint.pt""" ) if not os.path.isfile(lowerCamelCase ): raise ValueError(F'path to the file {checkpoint_file} does not exist!' ) __lowercase = torch.load(lowerCamelCase , map_location="""cpu""" ) __lowercase = chkpt["""cfg"""]["""model"""] # dicts __lowercase = os.path.join(lowerCamelCase , """dict.txt""" ) if not os.path.isfile(lowerCamelCase ): raise ValueError(F'path to the file {dict_file} does not exist!' ) __lowercase = Dictionary.load(lowerCamelCase ) __lowercase = rewrite_dict_keys(src_dict.indices ) __lowercase = len(lowerCamelCase ) __lowercase = os.path.join(lowerCamelCase , VOCAB_FILES_NAMES["""vocab_file"""] ) print(F'Generating {src_vocab_file} of {src_vocab_size} records' ) with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) ) # merges_file (bpecodes) __lowercase = os.path.join(lowerCamelCase , """bpecodes""" ) if not os.path.isfile(lowerCamelCase ): raise ValueError(F'path to the file {bpecodes_file} does not exist!' ) __lowercase = os.path.join(lowerCamelCase , VOCAB_FILES_NAMES["""merges_file"""] ) shutil.copyfile(lowerCamelCase , lowerCamelCase ) # model config __lowercase = os.path.join(lowerCamelCase , """config.json""" ) __lowercase = { """activation_dropout""": args["""activation_dropout"""], """architectures""": ["""BioGptForCausalLM"""], """attention_probs_dropout_prob""": args["""attention_dropout"""], """bos_token_id""": 0, """eos_token_id""": 2, """hidden_act""": args["""activation_fn"""], """hidden_dropout_prob""": args["""dropout"""], """hidden_size""": args["""decoder_embed_dim"""], """initializer_range""": 0.02, """intermediate_size""": args["""decoder_ffn_embed_dim"""], """layer_norm_eps""": 1e-12, """layerdrop""": args["""decoder_layerdrop"""], """max_position_embeddings""": args["""max_target_positions"""], """model_type""": """biogpt""", """num_attention_heads""": args["""decoder_attention_heads"""], """num_hidden_layers""": args["""decoder_layers"""], """pad_token_id""": 1, """scale_embedding""": not args["""no_scale_embedding"""], """tie_word_embeddings""": args["""share_decoder_input_output_embed"""], """vocab_size""": src_vocab_size, } # good hparam defaults to start with print(F'Generating {biogpt_model_config_file}' ) with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) ) # tokenizer config __lowercase = os.path.join(lowerCamelCase , lowerCamelCase ) __lowercase = { """bos_token""": """<s>""", """eos_token""": """</s>""", """model_max_length""": 1_024, """pad_token""": """<pad>""", """special_tokens_map_file""": None, """tokenizer_class""": """BioGptTokenizer""", """unk_token""": """<unk>""", } print(F'Generating {biogpt_tokenizer_config_file}' ) with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) ) # model __lowercase = chkpt["""model"""] # remove unneeded keys __lowercase = [ """decoder.version""", ] for k in ignore_keys: model_state_dict.pop(lowerCamelCase , lowerCamelCase ) __lowercase = list(model_state_dict.keys() ) for layer_name in layer_names: if layer_name.endswith("""output_projection.weight""" ): __lowercase = model_state_dict.pop(lowerCamelCase ) else: __lowercase = model_state_dict.pop(lowerCamelCase ) __lowercase = BioGptConfig.from_pretrained(lowerCamelCase ) __lowercase = BioGptForCausalLM(lowerCamelCase ) # check that it loads ok model_new.load_state_dict(lowerCamelCase ) # save __lowercase = os.path.join(lowerCamelCase , lowerCamelCase ) print(F'Generating {pytorch_weights_dump_path}' ) torch.save(lowerCamelCase , lowerCamelCase ) print("""Conversion is done!""" ) if __name__ == "__main__": __UpperCamelCase : int = argparse.ArgumentParser() # Required parameters parser.add_argument( """--biogpt_checkpoint_path""", default=None, type=str, required=True, help=( """Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,""" """ bpecodes, etc.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) __UpperCamelCase : Optional[Any] = parser.parse_args() convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
53
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __UpperCamelCase : Tuple = { """configuration_swiftformer""": [ """SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwiftFormerConfig""", """SwiftFormerOnnxConfig""", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Optional[Any] = [ """SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """SwiftFormerForImageClassification""", """SwiftFormerModel""", """SwiftFormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_swiftformer import ( SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, SwiftFormerConfig, SwiftFormerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swiftformer import ( SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, SwiftFormerForImageClassification, SwiftFormerModel, SwiftFormerPreTrainedModel, ) else: import sys __UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
53
import tempfile import unittest import numpy as np from diffusers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionPipeline, PNDMScheduler, ) from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ): __snake_case :Union[str, Any] = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline' def _a ( self : Any , _lowerCAmelCase : str=0 ) -> str: """simple docstring""" __lowercase = np.random.RandomState(_lowerCAmelCase ) __lowercase = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def _a ( self : int ) -> List[Any]: """simple docstring""" __lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) __lowercase = self.get_dummy_inputs() __lowercase = pipe(**_lowerCAmelCase ).images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __lowercase = np.array([0.65_072, 0.58_492, 0.48_219, 0.55_521, 0.53_180, 0.55_939, 0.50_697, 0.39_800, 0.46_455] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _a ( self : Union[str, Any] ) -> str: """simple docstring""" __lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __lowercase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) __lowercase = self.get_dummy_inputs() __lowercase = pipe(**_lowerCAmelCase ).images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __lowercase = np.array([0.65_863, 0.59_425, 0.49_326, 0.56_313, 0.53_875, 0.56_627, 0.51_065, 0.39_777, 0.46_330] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _a ( self : Optional[int] ) -> Optional[int]: """simple docstring""" __lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __lowercase = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) __lowercase = self.get_dummy_inputs() __lowercase = pipe(**_lowerCAmelCase ).images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __lowercase = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _a ( self : Tuple ) -> int: """simple docstring""" __lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __lowercase = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) __lowercase = self.get_dummy_inputs() __lowercase = pipe(**_lowerCAmelCase ).images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __lowercase = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _a ( self : Tuple ) -> Union[str, Any]: """simple docstring""" __lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __lowercase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) __lowercase = self.get_dummy_inputs() __lowercase = pipe(**_lowerCAmelCase ).images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __lowercase = np.array([0.53_817, 0.60_812, 0.47_384, 0.49_530, 0.51_894, 0.49_814, 0.47_984, 0.38_958, 0.44_271] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _a ( self : Union[str, Any] ) -> List[str]: """simple docstring""" __lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) __lowercase = self.get_dummy_inputs() __lowercase = pipe(**_lowerCAmelCase ).images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __lowercase = np.array([0.53_895, 0.60_808, 0.47_933, 0.49_608, 0.51_886, 0.49_950, 0.48_053, 0.38_957, 0.44_200] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _a ( self : List[str] ) -> List[str]: """simple docstring""" __lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) __lowercase = self.get_dummy_inputs() __lowercase = 3 * [inputs["""prompt"""]] # forward __lowercase = pipe(**_lowerCAmelCase ) __lowercase = output.images[0, -3:, -3:, -1] __lowercase = self.get_dummy_inputs() __lowercase = 3 * [inputs.pop("""prompt""" )] __lowercase = pipe.tokenizer( _lowerCAmelCase , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors="""np""" , ) __lowercase = text_inputs["""input_ids"""] __lowercase = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] __lowercase = prompt_embeds # forward __lowercase = pipe(**_lowerCAmelCase ) __lowercase = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4 def _a ( self : int ) -> str: """simple docstring""" __lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) __lowercase = self.get_dummy_inputs() __lowercase = 3 * ["""this is a negative prompt"""] __lowercase = negative_prompt __lowercase = 3 * [inputs["""prompt"""]] # forward __lowercase = pipe(**_lowerCAmelCase ) __lowercase = output.images[0, -3:, -3:, -1] __lowercase = self.get_dummy_inputs() __lowercase = 3 * [inputs.pop("""prompt""" )] __lowercase = [] for p in [prompt, negative_prompt]: __lowercase = pipe.tokenizer( _lowerCAmelCase , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors="""np""" , ) __lowercase = text_inputs["""input_ids"""] embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] ) __lowercase , __lowercase = embeds # forward __lowercase = pipe(**_lowerCAmelCase ) __lowercase = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4 @nightly @require_onnxruntime @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): @property def _a ( self : Dict ) -> str: """simple docstring""" return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _a ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" __lowercase = ort.SessionOptions() __lowercase = False return options def _a ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" __lowercase = OnnxStableDiffusionPipeline.from_pretrained( """CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) __lowercase = """A painting of a squirrel eating a burger""" np.random.seed(0 ) __lowercase = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="""np""" ) __lowercase = output.images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) __lowercase = np.array([0.0_452, 0.0_390, 0.0_087, 0.0_350, 0.0_617, 0.0_364, 0.0_544, 0.0_523, 0.0_720] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _a ( self : Tuple ) -> Any: """simple docstring""" __lowercase = DDIMScheduler.from_pretrained( """runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" ) __lowercase = OnnxStableDiffusionPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) __lowercase = """open neural network exchange""" __lowercase = np.random.RandomState(0 ) __lowercase = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCAmelCase , output_type="""np""" ) __lowercase = output.images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) __lowercase = np.array([0.2_867, 0.1_974, 0.1_481, 0.7_294, 0.7_251, 0.6_667, 0.4_194, 0.5_642, 0.6_486] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _a ( self : Dict ) -> Dict: """simple docstring""" __lowercase = LMSDiscreteScheduler.from_pretrained( """runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" ) __lowercase = OnnxStableDiffusionPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) __lowercase = """open neural network exchange""" __lowercase = np.random.RandomState(0 ) __lowercase = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCAmelCase , output_type="""np""" ) __lowercase = output.images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) __lowercase = np.array([0.2_306, 0.1_959, 0.1_593, 0.6_549, 0.6_394, 0.5_408, 0.5_065, 0.6_010, 0.6_161] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _a ( self : str ) -> List[str]: """simple docstring""" __lowercase = 0 def test_callback_fn(_lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : np.ndarray ) -> None: __lowercase = True nonlocal number_of_steps number_of_steps += 1 if step == 0: assert latents.shape == (1, 4, 64, 64) __lowercase = latents[0, -3:, -3:, -1] __lowercase = np.array( [-0.6_772, -0.3_835, -1.2_456, 0.1_905, -1.0_974, 0.6_967, -1.9_353, 0.0_178, 1.0_167] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3 elif step == 5: assert latents.shape == (1, 4, 64, 64) __lowercase = latents[0, -3:, -3:, -1] __lowercase = np.array( [-0.3_351, 0.2_241, -0.1_837, -0.2_325, -0.6_577, 0.3_393, -0.0_241, 0.5_899, 1.3_875] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3 __lowercase = False __lowercase = OnnxStableDiffusionPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) __lowercase = """Andromeda galaxy in a bottle""" __lowercase = np.random.RandomState(0 ) pipe( prompt=_lowerCAmelCase , num_inference_steps=5 , guidance_scale=7.5 , generator=_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 , ) assert test_callback_fn.has_been_called assert number_of_steps == 6 def _a ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __lowercase = OnnxStableDiffusionPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) assert pipe.safety_checker is None __lowercase = pipe("""example prompt""" , num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(_lowerCAmelCase ) __lowercase = OnnxStableDiffusionPipeline.from_pretrained(_lowerCAmelCase ) # sanity check that the pipeline still works assert pipe.safety_checker is None __lowercase = pipe("""example prompt""" , num_inference_steps=2 ).images[0] assert image is not None
53
1
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class __UpperCamelCase ( _lowerCAmelCase ): @slow @require_torch def _a ( self : List[Any] ) -> List[Any]: """simple docstring""" __lowercase = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" ) __lowercase = BertTokenizer.from_pretrained("""bert-base-uncased""" ) __lowercase = bertabert.config.encoder.vocab_size __lowercase = tokenizer.sep_token_id __lowercase = tokenizer.cls_token_id __lowercase = 128 __lowercase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" ) __lowercase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" ) __lowercase = train_dataset.select(range(32 ) ) __lowercase = val_dataset.select(range(16 ) ) __lowercase = 4 def _map_to_encoder_decoder_inputs(_lowerCAmelCase : Any ): # Tokenizer will automatically set [BOS] <text> [EOS] __lowercase = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=_lowerCAmelCase , max_length=512 ) __lowercase = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=_lowerCAmelCase , max_length=128 ) __lowercase = inputs.input_ids __lowercase = inputs.attention_mask __lowercase = outputs.input_ids __lowercase = outputs.input_ids.copy() __lowercase = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""] ] __lowercase = outputs.attention_mask assert all(len(_lowerCAmelCase ) == 512 for x in inputs.input_ids ) assert all(len(_lowerCAmelCase ) == 128 for x in outputs.input_ids ) return batch def _compute_metrics(_lowerCAmelCase : Tuple ): __lowercase = pred.label_ids __lowercase = pred.predictions # all unnecessary tokens are removed __lowercase = tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase ) __lowercase = tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase ) __lowercase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_lowerCAmelCase ) )] ) / len(_lowerCAmelCase ) return {"accuracy": accuracy} # map train dataset __lowercase = train_dataset.map( _map_to_encoder_decoder_inputs , batched=_lowerCAmelCase , batch_size=_lowerCAmelCase , remove_columns=["""article""", """highlights"""] , ) train_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) # same for validation dataset __lowercase = val_dataset.map( _map_to_encoder_decoder_inputs , batched=_lowerCAmelCase , batch_size=_lowerCAmelCase , remove_columns=["""article""", """highlights"""] , ) val_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) __lowercase = self.get_auto_remove_tmp_dir() __lowercase = SeqaSeqTrainingArguments( output_dir=_lowerCAmelCase , per_device_train_batch_size=_lowerCAmelCase , per_device_eval_batch_size=_lowerCAmelCase , predict_with_generate=_lowerCAmelCase , evaluation_strategy="""steps""" , do_train=_lowerCAmelCase , do_eval=_lowerCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer __lowercase = SeqaSeqTrainer( model=_lowerCAmelCase , args=_lowerCAmelCase , compute_metrics=_compute_metrics , train_dataset=_lowerCAmelCase , eval_dataset=_lowerCAmelCase , tokenizer=_lowerCAmelCase , ) # start training trainer.train()
53
def snake_case ( lowerCamelCase ): '''simple docstring''' __lowercase = """""" for ch in key: if ch == " " or ch not in key_no_dups and ch.isalpha(): key_no_dups += ch return key_no_dups def snake_case ( lowerCamelCase ): '''simple docstring''' __lowercase = [chr(i + 65 ) for i in range(26 )] # Remove duplicate characters from key __lowercase = remove_duplicates(key.upper() ) __lowercase = len(lowerCamelCase ) # First fill cipher with key characters __lowercase = {alphabet[i]: char for i, char in enumerate(lowerCamelCase )} # Then map remaining characters in alphabet to # the alphabet from the beginning for i in range(len(lowerCamelCase ) , 26 ): __lowercase = alphabet[i - offset] # Ensure we are not mapping letters to letters previously mapped while char in key: offset -= 1 __lowercase = alphabet[i - offset] __lowercase = char return cipher_alphabet def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' return "".join(cipher_map.get(lowerCamelCase , lowerCamelCase ) for ch in message.upper() ) def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = {v: k for k, v in cipher_map.items()} return "".join(rev_cipher_map.get(lowerCamelCase , lowerCamelCase ) for ch in message.upper() ) def snake_case ( ): '''simple docstring''' __lowercase = input("""Enter message to encode or decode: """ ).strip() __lowercase = input("""Enter keyword: """ ).strip() __lowercase = input("""Encipher or decipher? E/D:""" ).strip()[0].lower() try: __lowercase = {"""e""": encipher, """d""": decipher}[option] except KeyError: raise KeyError("""invalid input option""" ) __lowercase = create_cipher_map(lowerCamelCase ) print(func(lowerCamelCase , lowerCamelCase ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
53
1
import logging import math import os from dataclasses import dataclass, field from glob import glob from typing import Optional from torch.utils.data import ConcatDataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_WITH_LM_HEAD_MAPPING, AutoConfig, AutoModelWithLMHead, AutoTokenizer, DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForWholeWordMask, HfArgumentParser, LineByLineTextDataset, LineByLineWithRefDataset, PreTrainedTokenizer, TextDataset, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process __UpperCamelCase : List[Any] = logging.getLogger(__name__) __UpperCamelCase : Optional[Any] = list(MODEL_WITH_LM_HEAD_MAPPING.keys()) __UpperCamelCase : List[str] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class __UpperCamelCase : __snake_case :Optional[str] = field( default=_lowerCAmelCase , metadata={ 'help': ( 'The model checkpoint for weights initialization. Leave None if you want to train a model from' ' scratch.' ) } , ) __snake_case :Optional[str] = field( default=_lowerCAmelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(_lowerCAmelCase )} , ) __snake_case :Optional[str] = field( default=_lowerCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) __snake_case :Optional[str] = field( default=_lowerCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) __snake_case :Optional[str] = field( default=_lowerCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) @dataclass class __UpperCamelCase : __snake_case :Optional[str] = field( default=_lowerCAmelCase , metadata={'help': 'The input training data file (a text file).'} ) __snake_case :Optional[str] = field( default=_lowerCAmelCase , metadata={ 'help': ( 'The input training data files (multiple files in glob format). ' 'Very often splitting large files to smaller files can prevent tokenizer going out of memory' ) } , ) __snake_case :Optional[str] = field( default=_lowerCAmelCase , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , ) __snake_case :Optional[str] = field( default=_lowerCAmelCase , metadata={'help': 'An optional input train ref data file for whole word mask in Chinese.'} , ) __snake_case :Optional[str] = field( default=_lowerCAmelCase , metadata={'help': 'An optional input eval ref data file for whole word mask in Chinese.'} , ) __snake_case :bool = field( default=_lowerCAmelCase , metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'} , ) __snake_case :bool = field( default=_lowerCAmelCase , metadata={'help': 'Train with masked-language modeling loss instead of language modeling.'} ) __snake_case :bool = field(default=_lowerCAmelCase , metadata={'help': 'Whether ot not to use whole word mask.'} ) __snake_case :float = field( default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} ) __snake_case :float = field( default=1 / 6 , metadata={ 'help': ( 'Ratio of length of a span of masked tokens to surrounding context length for permutation language' ' modeling.' ) } , ) __snake_case :int = field( default=5 , metadata={'help': 'Maximum length of a span of masked tokens for permutation language modeling.'} ) __snake_case :int = field( default=-1 , metadata={ 'help': ( 'Optional input sequence length after tokenization.' 'The training dataset will be truncated in block of this size for training.' 'Default to the model max input length for single sentence inputs (take into account special tokens).' ) } , ) __snake_case :bool = field( default=_lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = None , ): '''simple docstring''' def _dataset(lowerCamelCase , lowerCamelCase=None ): if args.line_by_line: if ref_path is not None: if not args.whole_word_mask or not args.mlm: raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""" ) return LineByLineWithRefDataset( tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size , ref_path=lowerCamelCase , ) return LineByLineTextDataset(tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size ) else: return TextDataset( tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=lowerCamelCase , ) if evaluate: return _dataset(args.eval_data_file , args.eval_ref_file ) elif args.train_data_files: return ConcatDataset([_dataset(lowerCamelCase ) for f in glob(args.train_data_files )] ) else: return _dataset(args.train_data_file , args.train_ref_file ) def snake_case ( ): '''simple docstring''' __lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) __lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses() if data_args.eval_data_file is None and training_args.do_eval: raise ValueError( """Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """ """or remove the --do_eval argument.""" ) if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F'Output directory ({training_args.output_dir}) already exists and is not empty. Use' """ --overwrite_output_dir to overcome.""" ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( """Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("""Training/evaluation parameters %s""" , lowerCamelCase ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. if model_args.config_name: __lowercase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: __lowercase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir ) else: __lowercase = CONFIG_MAPPING[model_args.model_type]() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.tokenizer_name: __lowercase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: __lowercase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir ) else: raise ValueError( """You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another""" """ script, save it,and load it from here, using --tokenizer_name""" ) if model_args.model_name_or_path: __lowercase = AutoModelWithLMHead.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , ) else: logger.info("""Training new model from scratch""" ) __lowercase = AutoModelWithLMHead.from_config(lowerCamelCase ) model.resize_token_embeddings(len(lowerCamelCase ) ) if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm: raise ValueError( """BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the""" """--mlm flag (masked language modeling).""" ) if data_args.block_size <= 0: __lowercase = tokenizer.max_len # Our input block size will be the max possible for the model else: __lowercase = min(data_args.block_size , tokenizer.max_len ) # Get datasets __lowercase = ( get_dataset(lowerCamelCase , tokenizer=lowerCamelCase , cache_dir=model_args.cache_dir ) if training_args.do_train else None ) __lowercase = ( get_dataset(lowerCamelCase , tokenizer=lowerCamelCase , evaluate=lowerCamelCase , cache_dir=model_args.cache_dir ) if training_args.do_eval else None ) if config.model_type == "xlnet": __lowercase = DataCollatorForPermutationLanguageModeling( tokenizer=lowerCamelCase , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , ) else: if data_args.mlm and data_args.whole_word_mask: __lowercase = DataCollatorForWholeWordMask( tokenizer=lowerCamelCase , mlm_probability=data_args.mlm_probability ) else: __lowercase = DataCollatorForLanguageModeling( tokenizer=lowerCamelCase , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability ) # Initialize our Trainer __lowercase = Trainer( model=lowerCamelCase , args=lowerCamelCase , data_collator=lowerCamelCase , train_dataset=lowerCamelCase , eval_dataset=lowerCamelCase , prediction_loss_only=lowerCamelCase , ) # Training if training_args.do_train: __lowercase = ( model_args.model_name_or_path if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ) else None ) trainer.train(model_path=lowerCamelCase ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation __lowercase = {} if training_args.do_eval: logger.info("""*** Evaluate ***""" ) __lowercase = trainer.evaluate() __lowercase = math.exp(eval_output["""eval_loss"""] ) __lowercase = {"""perplexity""": perplexity} __lowercase = os.path.join(training_args.output_dir , """eval_results_lm.txt""" ) if trainer.is_world_master(): with open(lowerCamelCase , """w""" ) as writer: logger.info("""***** Eval results *****""" ) for key in sorted(result.keys() ): logger.info(""" %s = %s""" , lowerCamelCase , str(result[key] ) ) writer.write("""%s = %s\n""" % (key, str(result[key] )) ) results.update(lowerCamelCase ) return results def snake_case ( lowerCamelCase ): '''simple docstring''' main() if __name__ == "__main__": main()
53
import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): __snake_case :Any = IFInpaintingPipeline __snake_case :str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'} __snake_case :Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS __snake_case :str = PipelineTesterMixin.required_optional_params - {'latents'} def _a ( self : Union[str, Any] ) -> Tuple: """simple docstring""" return self._get_dummy_components() def _a ( self : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict=0 ) -> Any: """simple docstring""" if str(_lowerCAmelCase ).startswith("""mps""" ): __lowercase = torch.manual_seed(_lowerCAmelCase ) else: __lowercase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase ) __lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase ) __lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase ) __lowercase = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """mask_image""": mask_image, """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def _a ( self : Tuple ) -> Dict: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) def _a ( self : Union[str, Any] ) -> Dict: """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" ) def _a ( self : List[str] ) -> Tuple: """simple docstring""" super().test_save_load_floataa(expected_max_diff=1e-1 ) def _a ( self : Tuple ) -> Optional[Any]: """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def _a ( self : str ) -> Optional[int]: """simple docstring""" self._test_save_load_local() def _a ( self : List[str] ) -> List[Any]: """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1e-2 , )
53
1
from queue import Queue from typing import TYPE_CHECKING, Optional if TYPE_CHECKING: from ..models.auto import AutoTokenizer class __UpperCamelCase : def _a ( self : List[Any] , _lowerCAmelCase : Any ) -> str: """simple docstring""" raise NotImplementedError() def _a ( self : Dict ) -> int: """simple docstring""" raise NotImplementedError() class __UpperCamelCase ( _lowerCAmelCase ): def __init__( self : Dict , _lowerCAmelCase : "AutoTokenizer" , _lowerCAmelCase : bool = False , **_lowerCAmelCase : Dict ) -> Optional[Any]: """simple docstring""" __lowercase = tokenizer __lowercase = skip_prompt __lowercase = decode_kwargs # variables used in the streaming process __lowercase = [] __lowercase = 0 __lowercase = True def _a ( self : Optional[int] , _lowerCAmelCase : List[Any] ) -> Optional[Any]: """simple docstring""" if len(value.shape ) > 1 and value.shape[0] > 1: raise ValueError("""TextStreamer only supports batch size 1""" ) elif len(value.shape ) > 1: __lowercase = value[0] if self.skip_prompt and self.next_tokens_are_prompt: __lowercase = False return # Add the new token to the cache and decodes the entire thing. self.token_cache.extend(value.tolist() ) __lowercase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs ) # After the symbol for a new line, we flush the cache. if text.endswith("""\n""" ): __lowercase = text[self.print_len :] __lowercase = [] __lowercase = 0 # If the last token is a CJK character, we print the characters. elif len(_lowerCAmelCase ) > 0 and self._is_chinese_char(ord(text[-1] ) ): __lowercase = text[self.print_len :] self.print_len += len(_lowerCAmelCase ) # Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words, # which may change with the subsequent token -- there are probably smarter ways to do this!) else: __lowercase = text[self.print_len : text.rfind(""" """ ) + 1] self.print_len += len(_lowerCAmelCase ) self.on_finalized_text(_lowerCAmelCase ) def _a ( self : int ) -> Optional[int]: """simple docstring""" if len(self.token_cache ) > 0: __lowercase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs ) __lowercase = text[self.print_len :] __lowercase = [] __lowercase = 0 else: __lowercase = """""" __lowercase = True self.on_finalized_text(_lowerCAmelCase , stream_end=_lowerCAmelCase ) def _a ( self : int , _lowerCAmelCase : str , _lowerCAmelCase : bool = False ) -> str: """simple docstring""" print(_lowerCAmelCase , flush=_lowerCAmelCase , end="""""" if not stream_end else None ) def _a ( self : Tuple , _lowerCAmelCase : Tuple ) -> int: """simple docstring""" if ( (cp >= 0X4e00 and cp <= 0X9fff) or (cp >= 0X3400 and cp <= 0X4dbf) # or (cp >= 0X2_0000 and cp <= 0X2_a6df) # or (cp >= 0X2_a700 and cp <= 0X2_b73f) # or (cp >= 0X2_b740 and cp <= 0X2_b81f) # or (cp >= 0X2_b820 and cp <= 0X2_ceaf) # or (cp >= 0Xf900 and cp <= 0Xfaff) or (cp >= 0X2_f800 and cp <= 0X2_fa1f) # ): # return True return False class __UpperCamelCase ( _lowerCAmelCase ): def __init__( self : Optional[int] , _lowerCAmelCase : "AutoTokenizer" , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[float] = None , **_lowerCAmelCase : List[Any] ) -> List[str]: """simple docstring""" super().__init__(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ) __lowercase = Queue() __lowercase = None __lowercase = timeout def _a ( self : Dict , _lowerCAmelCase : str , _lowerCAmelCase : bool = False ) -> int: """simple docstring""" self.text_queue.put(_lowerCAmelCase , timeout=self.timeout ) if stream_end: self.text_queue.put(self.stop_signal , timeout=self.timeout ) def __iter__( self : Any ) -> Any: """simple docstring""" return self def _a ( self : Any ) -> Optional[int]: """simple docstring""" __lowercase = self.text_queue.get(timeout=self.timeout ) if value == self.stop_signal: raise StopIteration() else: return value
53
import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class __UpperCamelCase ( _lowerCAmelCase ): __snake_case :str = (UnCLIPScheduler,) def _a ( self : Optional[int] , **_lowerCAmelCase : Any ) -> Tuple: """simple docstring""" __lowercase = { """num_train_timesteps""": 1000, """variance_type""": """fixed_small_log""", """clip_sample""": True, """clip_sample_range""": 1.0, """prediction_type""": """epsilon""", } config.update(**_lowerCAmelCase ) return config def _a ( self : Dict ) -> List[Any]: """simple docstring""" for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=_lowerCAmelCase ) def _a ( self : List[str] ) -> Tuple: """simple docstring""" for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=_lowerCAmelCase ) def _a ( self : Any ) -> Any: """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=_lowerCAmelCase ) def _a ( self : Any ) -> Optional[Any]: """simple docstring""" for clip_sample_range in [1, 5, 10, 20]: self.check_over_configs(clip_sample_range=_lowerCAmelCase ) def _a ( self : Optional[int] ) -> Tuple: """simple docstring""" for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=_lowerCAmelCase ) def _a ( self : str ) -> int: """simple docstring""" for time_step in [0, 500, 999]: for prev_timestep in [None, 5, 100, 250, 500, 750]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=_lowerCAmelCase , prev_timestep=_lowerCAmelCase ) def _a ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __lowercase = self.scheduler_classes[0] __lowercase = self.get_scheduler_config(variance_type="""fixed_small_log""" ) __lowercase = scheduler_class(**_lowerCAmelCase ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1e-5 def _a ( self : str ) -> Optional[int]: """simple docstring""" __lowercase = self.scheduler_classes[0] __lowercase = self.get_scheduler_config(variance_type="""learned_range""" ) __lowercase = scheduler_class(**_lowerCAmelCase ) __lowercase = 0.5 assert scheduler._get_variance(1 , predicted_variance=_lowerCAmelCase ) - -10.1_712_790 < 1e-5 assert scheduler._get_variance(487 , predicted_variance=_lowerCAmelCase ) - -5.7_998_052 < 1e-5 assert scheduler._get_variance(999 , predicted_variance=_lowerCAmelCase ) - -0.0_010_011 < 1e-5 def _a ( self : Optional[Any] ) -> List[Any]: """simple docstring""" __lowercase = self.scheduler_classes[0] __lowercase = self.get_scheduler_config() __lowercase = scheduler_class(**_lowerCAmelCase ) __lowercase = scheduler.timesteps __lowercase = self.dummy_model() __lowercase = self.dummy_sample_deter __lowercase = torch.manual_seed(0 ) for i, t in enumerate(_lowerCAmelCase ): # 1. predict noise residual __lowercase = model(_lowerCAmelCase , _lowerCAmelCase ) # 2. predict previous mean of sample x_t-1 __lowercase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample __lowercase = pred_prev_sample __lowercase = torch.sum(torch.abs(_lowerCAmelCase ) ) __lowercase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_sum.item() - 252.2_682_495 ) < 1e-2 assert abs(result_mean.item() - 0.3_284_743 ) < 1e-3 def _a ( self : List[str] ) -> str: """simple docstring""" __lowercase = self.scheduler_classes[0] __lowercase = self.get_scheduler_config() __lowercase = scheduler_class(**_lowerCAmelCase ) scheduler.set_timesteps(25 ) __lowercase = scheduler.timesteps __lowercase = self.dummy_model() __lowercase = self.dummy_sample_deter __lowercase = torch.manual_seed(0 ) for i, t in enumerate(_lowerCAmelCase ): # 1. predict noise residual __lowercase = model(_lowerCAmelCase , _lowerCAmelCase ) if i + 1 == timesteps.shape[0]: __lowercase = None else: __lowercase = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 __lowercase = scheduler.step( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , prev_timestep=_lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample __lowercase = pred_prev_sample __lowercase = torch.sum(torch.abs(_lowerCAmelCase ) ) __lowercase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_sum.item() - 258.2_044_983 ) < 1e-2 assert abs(result_mean.item() - 0.3_362_038 ) < 1e-3 def _a ( self : str ) -> Union[str, Any]: """simple docstring""" pass def _a ( self : int ) -> List[str]: """simple docstring""" pass
53
1
import numpy # List of input, output pairs __UpperCamelCase : List[str] = ( ((5, 2, 3), 15), ((6, 5, 9), 25), ((11, 12, 13), 41), ((1, 1, 1), 8), ((11, 12, 13), 41), ) __UpperCamelCase : List[Any] = (((515, 22, 13), 555), ((61, 35, 49), 150)) __UpperCamelCase : List[Any] = [2, 4, 1, 5] __UpperCamelCase : Union[str, Any] = len(train_data) __UpperCamelCase : List[Any] = 0.0_0_9 def snake_case ( lowerCamelCase , lowerCamelCase="train" ): '''simple docstring''' return calculate_hypothesis_value(lowerCamelCase , lowerCamelCase ) - output( lowerCamelCase , lowerCamelCase ) def snake_case ( lowerCamelCase ): '''simple docstring''' __lowercase = 0 for i in range(len(lowerCamelCase ) - 1 ): hyp_val += data_input_tuple[i] * parameter_vector[i + 1] hyp_val += parameter_vector[0] return hyp_val def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' if data_set == "train": return train_data[example_no][1] elif data_set == "test": return test_data[example_no][1] return None def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' if data_set == "train": return _hypothesis_value(train_data[example_no][0] ) elif data_set == "test": return _hypothesis_value(test_data[example_no][0] ) return None def snake_case ( lowerCamelCase , lowerCamelCase=m ): '''simple docstring''' __lowercase = 0 for i in range(lowerCamelCase ): if index == -1: summation_value += _error(lowerCamelCase ) else: summation_value += _error(lowerCamelCase ) * train_data[i][0][index] return summation_value def snake_case ( lowerCamelCase ): '''simple docstring''' __lowercase = summation_of_cost_derivative(lowerCamelCase , lowerCamelCase ) / m return cost_derivative_value def snake_case ( ): '''simple docstring''' global parameter_vector # Tune these values to set a tolerance value for predicted output __lowercase = 0.000002 __lowercase = 0 __lowercase = 0 while True: j += 1 __lowercase = [0, 0, 0, 0] for i in range(0 , len(lowerCamelCase ) ): __lowercase = get_cost_derivative(i - 1 ) __lowercase = ( parameter_vector[i] - LEARNING_RATE * cost_derivative ) if numpy.allclose( lowerCamelCase , lowerCamelCase , atol=lowerCamelCase , rtol=lowerCamelCase , ): break __lowercase = temp_parameter_vector print(("""Number of iterations:""", j) ) def snake_case ( ): '''simple docstring''' for i in range(len(lowerCamelCase ) ): print(("""Actual output value:""", output(lowerCamelCase , """test""" )) ) print(("""Hypothesis output:""", calculate_hypothesis_value(lowerCamelCase , """test""" )) ) if __name__ == "__main__": run_gradient_descent() print("""\nTesting gradient descent for a linear hypothesis function.\n""") test_gradient_descent()
53
import os import time import warnings from dataclasses import dataclass, field from enum import Enum from typing import List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import logging from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors from ..processors.utils import InputFeatures __UpperCamelCase : Any = logging.get_logger(__name__) @dataclass class __UpperCamelCase : __snake_case :str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys() )} ) __snake_case :str = field( metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} ) __snake_case :int = field( default=1_2_8 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) __snake_case :bool = field( default=_lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) def _a ( self : Dict ) -> List[Any]: """simple docstring""" __lowercase = self.task_name.lower() class __UpperCamelCase ( _lowerCAmelCase ): __snake_case :Optional[int] = 'train' __snake_case :int = 'dev' __snake_case :Any = 'test' class __UpperCamelCase ( _lowerCAmelCase ): __snake_case :GlueDataTrainingArguments __snake_case :str __snake_case :List[InputFeatures] def __init__( self : Dict , _lowerCAmelCase : GlueDataTrainingArguments , _lowerCAmelCase : PreTrainedTokenizerBase , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Union[str, Split] = Split.train , _lowerCAmelCase : Optional[str] = None , ) -> List[Any]: """simple docstring""" warnings.warn( """This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """ """library. You can have a look at this example script for pointers: """ """https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , _lowerCAmelCase , ) __lowercase = args __lowercase = glue_processors[args.task_name]() __lowercase = glue_output_modes[args.task_name] if isinstance(_lowerCAmelCase , _lowerCAmelCase ): try: __lowercase = Split[mode] except KeyError: raise KeyError("""mode is not a valid split name""" ) # Load data features from cache or dataset file __lowercase = os.path.join( cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , ) __lowercase = self.processor.get_labels() if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in ( "RobertaTokenizer", "RobertaTokenizerFast", "XLMRobertaTokenizer", "BartTokenizer", "BartTokenizerFast", ): # HACK(label indices are swapped in RoBERTa pretrained model) __lowercase , __lowercase = label_list[2], label_list[1] __lowercase = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. __lowercase = cached_features_file + """.lock""" with FileLock(_lowerCAmelCase ): if os.path.exists(_lowerCAmelCase ) and not args.overwrite_cache: __lowercase = time.time() __lowercase = torch.load(_lowerCAmelCase ) logger.info( F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start ) else: logger.info(F'Creating features from dataset file at {args.data_dir}' ) if mode == Split.dev: __lowercase = self.processor.get_dev_examples(args.data_dir ) elif mode == Split.test: __lowercase = self.processor.get_test_examples(args.data_dir ) else: __lowercase = self.processor.get_train_examples(args.data_dir ) if limit_length is not None: __lowercase = examples[:limit_length] __lowercase = glue_convert_examples_to_features( _lowerCAmelCase , _lowerCAmelCase , max_length=args.max_seq_length , label_list=_lowerCAmelCase , output_mode=self.output_mode , ) __lowercase = time.time() torch.save(self.features , _lowerCAmelCase ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' ) def __len__( self : Dict ) -> Optional[int]: """simple docstring""" return len(self.features ) def __getitem__( self : Tuple , _lowerCAmelCase : Optional[int] ) -> InputFeatures: """simple docstring""" return self.features[i] def _a ( self : str ) -> int: """simple docstring""" return self.label_list
53
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __UpperCamelCase : List[Any] = logging.get_logger(__name__) __UpperCamelCase : Optional[Any] = { """microsoft/resnet-50""": """https://huggingface.co/microsoft/resnet-50/blob/main/config.json""", } class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ): __snake_case :Dict = 'resnet' __snake_case :Optional[int] = ['basic', 'bottleneck'] def __init__( self : List[Any] , _lowerCAmelCase : Tuple=3 , _lowerCAmelCase : List[str]=64 , _lowerCAmelCase : str=[256, 512, 1024, 2048] , _lowerCAmelCase : Union[str, Any]=[3, 4, 6, 3] , _lowerCAmelCase : Union[str, Any]="bottleneck" , _lowerCAmelCase : List[Any]="relu" , _lowerCAmelCase : int=False , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Optional[Any]=None , **_lowerCAmelCase : int , ) -> Optional[Any]: """simple docstring""" super().__init__(**_lowerCAmelCase ) if layer_type not in self.layer_types: raise ValueError(F'layer_type={layer_type} is not one of {",".join(self.layer_types )}' ) __lowercase = num_channels __lowercase = embedding_size __lowercase = hidden_sizes __lowercase = depths __lowercase = layer_type __lowercase = hidden_act __lowercase = downsample_in_first_stage __lowercase = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(_lowerCAmelCase ) + 1 )] __lowercase , __lowercase = get_aligned_output_features_output_indices( out_features=_lowerCAmelCase , out_indices=_lowerCAmelCase , stage_names=self.stage_names ) class __UpperCamelCase ( _lowerCAmelCase ): __snake_case :int = version.parse('1.11' ) @property def _a ( self : List[str] ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def _a ( self : Any ) -> float: """simple docstring""" return 1e-3
53
import logging import math import os from dataclasses import dataclass, field from glob import glob from typing import Optional from torch.utils.data import ConcatDataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_WITH_LM_HEAD_MAPPING, AutoConfig, AutoModelWithLMHead, AutoTokenizer, DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForWholeWordMask, HfArgumentParser, LineByLineTextDataset, LineByLineWithRefDataset, PreTrainedTokenizer, TextDataset, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process __UpperCamelCase : List[Any] = logging.getLogger(__name__) __UpperCamelCase : Optional[Any] = list(MODEL_WITH_LM_HEAD_MAPPING.keys()) __UpperCamelCase : List[str] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class __UpperCamelCase : __snake_case :Optional[str] = field( default=_lowerCAmelCase , metadata={ 'help': ( 'The model checkpoint for weights initialization. Leave None if you want to train a model from' ' scratch.' ) } , ) __snake_case :Optional[str] = field( default=_lowerCAmelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(_lowerCAmelCase )} , ) __snake_case :Optional[str] = field( default=_lowerCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) __snake_case :Optional[str] = field( default=_lowerCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) __snake_case :Optional[str] = field( default=_lowerCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) @dataclass class __UpperCamelCase : __snake_case :Optional[str] = field( default=_lowerCAmelCase , metadata={'help': 'The input training data file (a text file).'} ) __snake_case :Optional[str] = field( default=_lowerCAmelCase , metadata={ 'help': ( 'The input training data files (multiple files in glob format). ' 'Very often splitting large files to smaller files can prevent tokenizer going out of memory' ) } , ) __snake_case :Optional[str] = field( default=_lowerCAmelCase , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , ) __snake_case :Optional[str] = field( default=_lowerCAmelCase , metadata={'help': 'An optional input train ref data file for whole word mask in Chinese.'} , ) __snake_case :Optional[str] = field( default=_lowerCAmelCase , metadata={'help': 'An optional input eval ref data file for whole word mask in Chinese.'} , ) __snake_case :bool = field( default=_lowerCAmelCase , metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'} , ) __snake_case :bool = field( default=_lowerCAmelCase , metadata={'help': 'Train with masked-language modeling loss instead of language modeling.'} ) __snake_case :bool = field(default=_lowerCAmelCase , metadata={'help': 'Whether ot not to use whole word mask.'} ) __snake_case :float = field( default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} ) __snake_case :float = field( default=1 / 6 , metadata={ 'help': ( 'Ratio of length of a span of masked tokens to surrounding context length for permutation language' ' modeling.' ) } , ) __snake_case :int = field( default=5 , metadata={'help': 'Maximum length of a span of masked tokens for permutation language modeling.'} ) __snake_case :int = field( default=-1 , metadata={ 'help': ( 'Optional input sequence length after tokenization.' 'The training dataset will be truncated in block of this size for training.' 'Default to the model max input length for single sentence inputs (take into account special tokens).' ) } , ) __snake_case :bool = field( default=_lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = None , ): '''simple docstring''' def _dataset(lowerCamelCase , lowerCamelCase=None ): if args.line_by_line: if ref_path is not None: if not args.whole_word_mask or not args.mlm: raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""" ) return LineByLineWithRefDataset( tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size , ref_path=lowerCamelCase , ) return LineByLineTextDataset(tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size ) else: return TextDataset( tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=lowerCamelCase , ) if evaluate: return _dataset(args.eval_data_file , args.eval_ref_file ) elif args.train_data_files: return ConcatDataset([_dataset(lowerCamelCase ) for f in glob(args.train_data_files )] ) else: return _dataset(args.train_data_file , args.train_ref_file ) def snake_case ( ): '''simple docstring''' __lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) __lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses() if data_args.eval_data_file is None and training_args.do_eval: raise ValueError( """Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """ """or remove the --do_eval argument.""" ) if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F'Output directory ({training_args.output_dir}) already exists and is not empty. Use' """ --overwrite_output_dir to overcome.""" ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( """Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("""Training/evaluation parameters %s""" , lowerCamelCase ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. if model_args.config_name: __lowercase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: __lowercase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir ) else: __lowercase = CONFIG_MAPPING[model_args.model_type]() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.tokenizer_name: __lowercase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: __lowercase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir ) else: raise ValueError( """You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another""" """ script, save it,and load it from here, using --tokenizer_name""" ) if model_args.model_name_or_path: __lowercase = AutoModelWithLMHead.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , ) else: logger.info("""Training new model from scratch""" ) __lowercase = AutoModelWithLMHead.from_config(lowerCamelCase ) model.resize_token_embeddings(len(lowerCamelCase ) ) if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm: raise ValueError( """BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the""" """--mlm flag (masked language modeling).""" ) if data_args.block_size <= 0: __lowercase = tokenizer.max_len # Our input block size will be the max possible for the model else: __lowercase = min(data_args.block_size , tokenizer.max_len ) # Get datasets __lowercase = ( get_dataset(lowerCamelCase , tokenizer=lowerCamelCase , cache_dir=model_args.cache_dir ) if training_args.do_train else None ) __lowercase = ( get_dataset(lowerCamelCase , tokenizer=lowerCamelCase , evaluate=lowerCamelCase , cache_dir=model_args.cache_dir ) if training_args.do_eval else None ) if config.model_type == "xlnet": __lowercase = DataCollatorForPermutationLanguageModeling( tokenizer=lowerCamelCase , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , ) else: if data_args.mlm and data_args.whole_word_mask: __lowercase = DataCollatorForWholeWordMask( tokenizer=lowerCamelCase , mlm_probability=data_args.mlm_probability ) else: __lowercase = DataCollatorForLanguageModeling( tokenizer=lowerCamelCase , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability ) # Initialize our Trainer __lowercase = Trainer( model=lowerCamelCase , args=lowerCamelCase , data_collator=lowerCamelCase , train_dataset=lowerCamelCase , eval_dataset=lowerCamelCase , prediction_loss_only=lowerCamelCase , ) # Training if training_args.do_train: __lowercase = ( model_args.model_name_or_path if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ) else None ) trainer.train(model_path=lowerCamelCase ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation __lowercase = {} if training_args.do_eval: logger.info("""*** Evaluate ***""" ) __lowercase = trainer.evaluate() __lowercase = math.exp(eval_output["""eval_loss"""] ) __lowercase = {"""perplexity""": perplexity} __lowercase = os.path.join(training_args.output_dir , """eval_results_lm.txt""" ) if trainer.is_world_master(): with open(lowerCamelCase , """w""" ) as writer: logger.info("""***** Eval results *****""" ) for key in sorted(result.keys() ): logger.info(""" %s = %s""" , lowerCamelCase , str(result[key] ) ) writer.write("""%s = %s\n""" % (key, str(result[key] )) ) results.update(lowerCamelCase ) return results def snake_case ( lowerCamelCase ): '''simple docstring''' main() if __name__ == "__main__": main()
53
1
import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class __UpperCamelCase ( unittest.TestCase ): def _a ( self : Union[str, Any] ) -> Any: """simple docstring""" super().tearDown() gc.collect() def _a ( self : List[str] ) -> int: """simple docstring""" __lowercase , __lowercase = FlaxStableDiffusionPipeline.from_pretrained( """stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , ) __lowercase = """A painting of a squirrel eating a burger""" __lowercase = jax.device_count() __lowercase = num_samples * [prompt] __lowercase = sd_pipe.prepare_inputs(_lowerCAmelCase ) __lowercase = replicate(_lowerCAmelCase ) __lowercase = shard(_lowerCAmelCase ) __lowercase = jax.random.PRNGKey(0 ) __lowercase = jax.random.split(_lowerCAmelCase , jax.device_count() ) __lowercase = sd_pipe(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_inference_steps=25 , jit=_lowerCAmelCase )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) __lowercase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __lowercase = images[0, 253:256, 253:256, -1] __lowercase = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __lowercase = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] ) print(F'output_slice: {output_slice}' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 def _a ( self : str ) -> List[Any]: """simple docstring""" __lowercase = """stabilityai/stable-diffusion-2""" __lowercase , __lowercase = FlaxDPMSolverMultistepScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" ) __lowercase , __lowercase = FlaxStableDiffusionPipeline.from_pretrained( _lowerCAmelCase , scheduler=_lowerCAmelCase , revision="""bf16""" , dtype=jnp.bfloataa , ) __lowercase = scheduler_params __lowercase = """A painting of a squirrel eating a burger""" __lowercase = jax.device_count() __lowercase = num_samples * [prompt] __lowercase = sd_pipe.prepare_inputs(_lowerCAmelCase ) __lowercase = replicate(_lowerCAmelCase ) __lowercase = shard(_lowerCAmelCase ) __lowercase = jax.random.PRNGKey(0 ) __lowercase = jax.random.split(_lowerCAmelCase , jax.device_count() ) __lowercase = sd_pipe(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_inference_steps=25 , jit=_lowerCAmelCase )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) __lowercase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __lowercase = images[0, 253:256, 253:256, -1] __lowercase = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __lowercase = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] ) print(F'output_slice: {output_slice}' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
53
from __future__ import annotations def snake_case ( lowerCamelCase ): '''simple docstring''' if len(lowerCamelCase ) < 2: raise ValueError("""Monogons and Digons are not polygons in the Euclidean space""" ) if any(i <= 0 for i in nums ): raise ValueError("""All values must be greater than 0""" ) __lowercase = nums.copy() copy_nums.sort() return copy_nums[-1] < sum(copy_nums[:-1] ) if __name__ == "__main__": import doctest doctest.testmod()
53
1
def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' return price * (1 + tax_rate) if __name__ == "__main__": print(F'''{price_plus_tax(100, 0.2_5) = }''') print(F'''{price_plus_tax(1_2_5.5_0, 0.0_5) = }''')
53
from __future__ import annotations def snake_case ( lowerCamelCase ): '''simple docstring''' if not nums: return 0 __lowercase = nums[0] __lowercase = 0 for num in nums[1:]: __lowercase , __lowercase = ( max_excluding + num, max(lowerCamelCase , lowerCamelCase ), ) return max(lowerCamelCase , lowerCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
53
1
from __future__ import annotations def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' if (direction == 1 and array[indexa] > array[indexa]) or ( direction == 0 and array[indexa] < array[indexa] ): __lowercase , __lowercase = array[indexa], array[indexa] def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' if length > 1: __lowercase = int(length / 2 ) for i in range(lowerCamelCase , low + middle ): comp_and_swap(lowerCamelCase , lowerCamelCase , i + middle , lowerCamelCase ) bitonic_merge(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) bitonic_merge(lowerCamelCase , low + middle , lowerCamelCase , lowerCamelCase ) def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' if length > 1: __lowercase = int(length / 2 ) bitonic_sort(lowerCamelCase , lowerCamelCase , lowerCamelCase , 1 ) bitonic_sort(lowerCamelCase , low + middle , lowerCamelCase , 0 ) bitonic_merge(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) if __name__ == "__main__": __UpperCamelCase : Any = input("""Enter numbers separated by a comma:\n""").strip() __UpperCamelCase : Dict = [int(item.strip()) for item in user_input.split(""",""")] bitonic_sort(unsorted, 0, len(unsorted), 1) print("""\nSorted array in ascending order is: """, end="""""") print(*unsorted, sep=""", """) bitonic_merge(unsorted, 0, len(unsorted), 0) print("""Sorted array in descending order is: """, end="""""") print(*unsorted, sep=""", """)
53
import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() __UpperCamelCase : str = logging.get_logger(__name__) def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = RobertaPreLayerNormConfig.from_pretrained( lowerCamelCase , architectures=["""RobertaPreLayerNormForMaskedLM"""] ) # convert state_dict __lowercase = torch.load(hf_hub_download(repo_id=lowerCamelCase , filename="""pytorch_model.bin""" ) ) __lowercase = {} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith("""roberta.""" ): __lowercase = """roberta_prelayernorm.""" + tensor_key[len("""roberta.""" ) :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith(""".self.LayerNorm.weight""" ) or tensor_key.endswith(""".self.LayerNorm.bias""" ): continue __lowercase = tensor_value __lowercase = RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=lowerCamelCase , config=lowerCamelCase , state_dict=lowerCamelCase ) model.save_pretrained(lowerCamelCase ) # convert tokenizer __lowercase = AutoTokenizer.from_pretrained(lowerCamelCase ) tokenizer.save_pretrained(lowerCamelCase ) if __name__ == "__main__": __UpperCamelCase : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint-repo""", default=None, type=str, required=True, help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) __UpperCamelCase : Dict = parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
53
1
import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.text import TextDatasetReader from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' assert isinstance(lowerCamelCase , lowerCamelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = tmp_path / """cache""" __lowercase = {"""text""": """string"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __lowercase = TextDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase , keep_in_memory=lowerCamelCase ).read() _check_text_dataset(lowerCamelCase , lowerCamelCase ) @pytest.mark.parametrize( """features""" , [ None, {"""text""": """string"""}, {"""text""": """int32"""}, {"""text""": """float32"""}, ] , ) def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = tmp_path / """cache""" __lowercase = {"""text""": """string"""} __lowercase = features.copy() if features else default_expected_features __lowercase = ( Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) __lowercase = TextDatasetReader(lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase ).read() _check_text_dataset(lowerCamelCase , lowerCamelCase ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = tmp_path / """cache""" __lowercase = {"""text""": """string"""} __lowercase = TextDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase , split=lowerCamelCase ).read() _check_text_dataset(lowerCamelCase , lowerCamelCase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""" , [str, list] ) def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' if issubclass(lowerCamelCase , lowerCamelCase ): __lowercase = text_path elif issubclass(lowerCamelCase , lowerCamelCase ): __lowercase = [text_path] __lowercase = tmp_path / """cache""" __lowercase = {"""text""": """string"""} __lowercase = TextDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase ).read() _check_text_dataset(lowerCamelCase , lowerCamelCase ) def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase=("train",) ): '''simple docstring''' assert isinstance(lowerCamelCase , lowerCamelCase ) for split in splits: __lowercase = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = tmp_path / """cache""" __lowercase = {"""text""": """string"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __lowercase = TextDatasetReader({"""train""": text_path} , cache_dir=lowerCamelCase , keep_in_memory=lowerCamelCase ).read() _check_text_datasetdict(lowerCamelCase , lowerCamelCase ) @pytest.mark.parametrize( """features""" , [ None, {"""text""": """string"""}, {"""text""": """int32"""}, {"""text""": """float32"""}, ] , ) def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = tmp_path / """cache""" # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" __lowercase = {"""text""": """string"""} __lowercase = features.copy() if features else default_expected_features __lowercase = ( Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) __lowercase = TextDatasetReader({"""train""": text_path} , features=lowerCamelCase , cache_dir=lowerCamelCase ).read() _check_text_datasetdict(lowerCamelCase , lowerCamelCase ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' if split: __lowercase = {split: text_path} else: __lowercase = """train""" __lowercase = {"""train""": text_path, """test""": text_path} __lowercase = tmp_path / """cache""" __lowercase = {"""text""": """string"""} __lowercase = TextDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase ).read() _check_text_datasetdict(lowerCamelCase , lowerCamelCase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() )
53
import numpy as np from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' if (ksize % 2) == 0: __lowercase = ksize + 1 __lowercase = np.zeros((ksize, ksize) , dtype=np.floataa ) # each value for y in range(lowerCamelCase ): for x in range(lowerCamelCase ): # distance from center __lowercase = x - ksize // 2 __lowercase = y - ksize // 2 # degree to radiant __lowercase = theta / 180 * np.pi __lowercase = np.cos(_theta ) __lowercase = np.sin(_theta ) # get kernel x __lowercase = cos_theta * px + sin_theta * py # get kernel y __lowercase = -sin_theta * px + cos_theta * py # fill kernel __lowercase = np.exp( -(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi ) return gabor if __name__ == "__main__": import doctest doctest.testmod() # read original image __UpperCamelCase : List[Any] = imread("""../image_data/lena.jpg""") # turn image in gray scale value __UpperCamelCase : Union[str, Any] = cvtColor(img, COLOR_BGR2GRAY) # Apply multiple Kernel to detect edges __UpperCamelCase : Union[str, Any] = np.zeros(gray.shape[:2]) for theta in [0, 30, 60, 90, 120, 150]: __UpperCamelCase : Tuple = gabor_filter_kernel(10, 8, theta, 10, 0, 0) out += filteraD(gray, CV_8UC3, kernel_aa) __UpperCamelCase : List[str] = out / out.max() * 255 __UpperCamelCase : List[str] = out.astype(np.uinta) imshow("""Original""", gray) imshow("""Gabor filter with 20x20 mask and 6 directions""", out) waitKey(0)
53
1
import argparse import io import requests import torch from omegaconf import OmegaConf from diffusers import AutoencoderKL from diffusers.pipelines.stable_diffusion.convert_from_ckpt import ( assign_to_checkpoint, conv_attn_to_linear, create_vae_diffusers_config, renew_vae_attention_paths, renew_vae_resnet_paths, ) def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = checkpoint __lowercase = {} __lowercase = vae_state_dict["""encoder.conv_in.weight"""] __lowercase = vae_state_dict["""encoder.conv_in.bias"""] __lowercase = vae_state_dict["""encoder.conv_out.weight"""] __lowercase = vae_state_dict["""encoder.conv_out.bias"""] __lowercase = vae_state_dict["""encoder.norm_out.weight"""] __lowercase = vae_state_dict["""encoder.norm_out.bias"""] __lowercase = vae_state_dict["""decoder.conv_in.weight"""] __lowercase = vae_state_dict["""decoder.conv_in.bias"""] __lowercase = vae_state_dict["""decoder.conv_out.weight"""] __lowercase = vae_state_dict["""decoder.conv_out.bias"""] __lowercase = vae_state_dict["""decoder.norm_out.weight"""] __lowercase = vae_state_dict["""decoder.norm_out.bias"""] __lowercase = vae_state_dict["""quant_conv.weight"""] __lowercase = vae_state_dict["""quant_conv.bias"""] __lowercase = vae_state_dict["""post_quant_conv.weight"""] __lowercase = vae_state_dict["""post_quant_conv.bias"""] # Retrieves the keys for the encoder down blocks only __lowercase = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """encoder.down""" in layer} ) __lowercase = { layer_id: [key for key in vae_state_dict if F'down.{layer_id}' in key] for layer_id in range(lowerCamelCase ) } # Retrieves the keys for the decoder up blocks only __lowercase = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """decoder.up""" in layer} ) __lowercase = { layer_id: [key for key in vae_state_dict if F'up.{layer_id}' in key] for layer_id in range(lowerCamelCase ) } for i in range(lowerCamelCase ): __lowercase = [key for key in down_blocks[i] if F'down.{i}' in key and F'down.{i}.downsample' not in key] if F'encoder.down.{i}.downsample.conv.weight' in vae_state_dict: __lowercase = vae_state_dict.pop( F'encoder.down.{i}.downsample.conv.weight' ) __lowercase = vae_state_dict.pop( F'encoder.down.{i}.downsample.conv.bias' ) __lowercase = renew_vae_resnet_paths(lowerCamelCase ) __lowercase = {"""old""": F'down.{i}.block', """new""": F'down_blocks.{i}.resnets'} assign_to_checkpoint(lowerCamelCase , lowerCamelCase , lowerCamelCase , additional_replacements=[meta_path] , config=lowerCamelCase ) __lowercase = [key for key in vae_state_dict if """encoder.mid.block""" in key] __lowercase = 2 for i in range(1 , num_mid_res_blocks + 1 ): __lowercase = [key for key in mid_resnets if F'encoder.mid.block_{i}' in key] __lowercase = renew_vae_resnet_paths(lowerCamelCase ) __lowercase = {"""old""": F'mid.block_{i}', """new""": F'mid_block.resnets.{i - 1}'} assign_to_checkpoint(lowerCamelCase , lowerCamelCase , lowerCamelCase , additional_replacements=[meta_path] , config=lowerCamelCase ) __lowercase = [key for key in vae_state_dict if """encoder.mid.attn""" in key] __lowercase = renew_vae_attention_paths(lowerCamelCase ) __lowercase = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""} assign_to_checkpoint(lowerCamelCase , lowerCamelCase , lowerCamelCase , additional_replacements=[meta_path] , config=lowerCamelCase ) conv_attn_to_linear(lowerCamelCase ) for i in range(lowerCamelCase ): __lowercase = num_up_blocks - 1 - i __lowercase = [ key for key in up_blocks[block_id] if F'up.{block_id}' in key and F'up.{block_id}.upsample' not in key ] if F'decoder.up.{block_id}.upsample.conv.weight' in vae_state_dict: __lowercase = vae_state_dict[ F'decoder.up.{block_id}.upsample.conv.weight' ] __lowercase = vae_state_dict[ F'decoder.up.{block_id}.upsample.conv.bias' ] __lowercase = renew_vae_resnet_paths(lowerCamelCase ) __lowercase = {"""old""": F'up.{block_id}.block', """new""": F'up_blocks.{i}.resnets'} assign_to_checkpoint(lowerCamelCase , lowerCamelCase , lowerCamelCase , additional_replacements=[meta_path] , config=lowerCamelCase ) __lowercase = [key for key in vae_state_dict if """decoder.mid.block""" in key] __lowercase = 2 for i in range(1 , num_mid_res_blocks + 1 ): __lowercase = [key for key in mid_resnets if F'decoder.mid.block_{i}' in key] __lowercase = renew_vae_resnet_paths(lowerCamelCase ) __lowercase = {"""old""": F'mid.block_{i}', """new""": F'mid_block.resnets.{i - 1}'} assign_to_checkpoint(lowerCamelCase , lowerCamelCase , lowerCamelCase , additional_replacements=[meta_path] , config=lowerCamelCase ) __lowercase = [key for key in vae_state_dict if """decoder.mid.attn""" in key] __lowercase = renew_vae_attention_paths(lowerCamelCase ) __lowercase = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""} assign_to_checkpoint(lowerCamelCase , lowerCamelCase , lowerCamelCase , additional_replacements=[meta_path] , config=lowerCamelCase ) conv_attn_to_linear(lowerCamelCase ) return new_checkpoint def snake_case ( lowerCamelCase , lowerCamelCase , ): '''simple docstring''' __lowercase = requests.get( """ https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml""" ) __lowercase = io.BytesIO(r.content ) __lowercase = OmegaConf.load(lowerCamelCase ) __lowercase = 512 __lowercase = """cuda""" if torch.cuda.is_available() else """cpu""" if checkpoint_path.endswith("""safetensors""" ): from safetensors import safe_open __lowercase = {} with safe_open(lowerCamelCase , framework="""pt""" , device="""cpu""" ) as f: for key in f.keys(): __lowercase = f.get_tensor(lowerCamelCase ) else: __lowercase = torch.load(lowerCamelCase , map_location=lowerCamelCase )["""state_dict"""] # Convert the VAE model. __lowercase = create_vae_diffusers_config(lowerCamelCase , image_size=lowerCamelCase ) __lowercase = custom_convert_ldm_vae_checkpoint(lowerCamelCase , lowerCamelCase ) __lowercase = AutoencoderKL(**lowerCamelCase ) vae.load_state_dict(lowerCamelCase ) vae.save_pretrained(lowerCamelCase ) if __name__ == "__main__": __UpperCamelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument("""--vae_pt_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""") parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""") __UpperCamelCase : Tuple = parser.parse_args() vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
53
import argparse import json import os import time import zipfile from get_ci_error_statistics import download_artifact, get_artifacts_links from transformers import logging __UpperCamelCase : Optional[Any] = logging.get_logger(__name__) def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = set() __lowercase = [] def parse_line(lowerCamelCase ): for line in fp: if isinstance(lowerCamelCase , lowerCamelCase ): __lowercase = line.decode("""UTF-8""" ) if "warnings summary (final)" in line: continue # This means we are outside the body of a warning elif not line.startswith(""" """ ): # process a single warning and move it to `selected_warnings`. if len(lowerCamelCase ) > 0: __lowercase = """\n""".join(lowerCamelCase ) # Only keep the warnings specified in `targets` if any(F': {x}: ' in warning for x in targets ): selected_warnings.add(lowerCamelCase ) buffer.clear() continue else: __lowercase = line.strip() buffer.append(lowerCamelCase ) if from_gh: for filename in os.listdir(lowerCamelCase ): __lowercase = os.path.join(lowerCamelCase , lowerCamelCase ) if not os.path.isdir(lowerCamelCase ): # read the file if filename != "warnings.txt": continue with open(lowerCamelCase ) as fp: parse_line(lowerCamelCase ) else: try: with zipfile.ZipFile(lowerCamelCase ) as z: for filename in z.namelist(): if not os.path.isdir(lowerCamelCase ): # read the file if filename != "warnings.txt": continue with z.open(lowerCamelCase ) as fp: parse_line(lowerCamelCase ) except Exception: logger.warning( F'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' ) return selected_warnings def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = set() __lowercase = [os.path.join(lowerCamelCase , lowerCamelCase ) for p in os.listdir(lowerCamelCase ) if (p.endswith(""".zip""" ) or from_gh)] for p in paths: selected_warnings.update(extract_warnings_from_single_artifact(lowerCamelCase , lowerCamelCase ) ) return selected_warnings if __name__ == "__main__": def snake_case ( lowerCamelCase ): '''simple docstring''' return values.split(""",""" ) __UpperCamelCase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""") parser.add_argument( """--output_dir""", type=str, required=True, help="""Where to store the downloaded artifacts and other result files.""", ) parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""") # optional parameters parser.add_argument( """--targets""", default="""DeprecationWarning,UserWarning,FutureWarning""", type=list_str, help="""Comma-separated list of target warning(s) which we want to extract.""", ) parser.add_argument( """--from_gh""", action="""store_true""", help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""", ) __UpperCamelCase : List[str] = parser.parse_args() __UpperCamelCase : Union[str, Any] = args.from_gh if from_gh: # The artifacts have to be downloaded using `actions/download-artifact@v3` pass else: os.makedirs(args.output_dir, exist_ok=True) # get download links __UpperCamelCase : Any = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) # download artifacts for idx, (name, url) in enumerate(artifacts.items()): print(name) print(url) print("""=""" * 80) download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) # extract warnings from artifacts __UpperCamelCase : Union[str, Any] = extract_warnings(args.output_dir, args.targets) __UpperCamelCase : Any = sorted(selected_warnings) with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
53
1
import re def snake_case ( lowerCamelCase ): '''simple docstring''' __lowercase = re.compile(r"""^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$""" ) if match := re.search(lowerCamelCase , lowerCamelCase ): return match.string == phone return False if __name__ == "__main__": print(indian_phone_validator("""+918827897895"""))
53
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __UpperCamelCase : Any = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Dict = [ """MRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """MraForMaskedLM""", """MraForMultipleChoice""", """MraForQuestionAnswering""", """MraForSequenceClassification""", """MraForTokenClassification""", """MraLayer""", """MraModel""", """MraPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys __UpperCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
53
1
def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' return "\n".join( F'{number} * {i} = {number * i}' for i in range(1 , number_of_terms + 1 ) ) if __name__ == "__main__": print(multiplication_table(number=5, number_of_terms=10))
53
def snake_case ( lowerCamelCase ): '''simple docstring''' if not isinstance(lowerCamelCase , lowerCamelCase ): raise ValueError("""check_bouncy() accepts only integer arguments""" ) __lowercase = str(lowerCamelCase ) __lowercase = """""".join(sorted(lowerCamelCase ) ) return sorted_str_n != str_n and sorted_str_n[::-1] != str_n def snake_case ( lowerCamelCase = 99 ): '''simple docstring''' if not 0 < percent < 100: raise ValueError("""solution() only accepts values from 0 to 100""" ) __lowercase = 0 __lowercase = 1 while True: if check_bouncy(lowerCamelCase ): bouncy_num += 1 if (bouncy_num / num) * 100 >= percent: return num num += 1 if __name__ == "__main__": from doctest import testmod testmod() print(F'''{solution(99)}''')
53
1
from ...utils import ( OptionalDependencyNotAvailable, is_flax_available, is_torch_available, is_transformers_available, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline if is_transformers_available() and is_flax_available(): from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
53
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __UpperCamelCase : Tuple = { """configuration_swiftformer""": [ """SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwiftFormerConfig""", """SwiftFormerOnnxConfig""", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Optional[Any] = [ """SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """SwiftFormerForImageClassification""", """SwiftFormerModel""", """SwiftFormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_swiftformer import ( SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, SwiftFormerConfig, SwiftFormerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swiftformer import ( SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, SwiftFormerForImageClassification, SwiftFormerModel, SwiftFormerPreTrainedModel, ) else: import sys __UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
53
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __UpperCamelCase : List[Any] = { """configuration_maskformer""": ["""MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MaskFormerConfig"""], """configuration_maskformer_swin""": ["""MaskFormerSwinConfig"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : str = ["""MaskFormerFeatureExtractor"""] __UpperCamelCase : Union[str, Any] = ["""MaskFormerImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Any = [ """MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """MaskFormerForInstanceSegmentation""", """MaskFormerModel""", """MaskFormerPreTrainedModel""", ] __UpperCamelCase : Any = [ """MaskFormerSwinBackbone""", """MaskFormerSwinModel""", """MaskFormerSwinPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig from .configuration_maskformer_swin import MaskFormerSwinConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_maskformer import MaskFormerFeatureExtractor from .image_processing_maskformer import MaskFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskformer import ( MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskFormerForInstanceSegmentation, MaskFormerModel, MaskFormerPreTrainedModel, ) from .modeling_maskformer_swin import ( MaskFormerSwinBackbone, MaskFormerSwinModel, MaskFormerSwinPreTrainedModel, ) else: import sys __UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
53
import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class __UpperCamelCase ( unittest.TestCase ): def _a ( self : Union[str, Any] ) -> Any: """simple docstring""" super().tearDown() gc.collect() def _a ( self : List[str] ) -> int: """simple docstring""" __lowercase , __lowercase = FlaxStableDiffusionPipeline.from_pretrained( """stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , ) __lowercase = """A painting of a squirrel eating a burger""" __lowercase = jax.device_count() __lowercase = num_samples * [prompt] __lowercase = sd_pipe.prepare_inputs(_lowerCAmelCase ) __lowercase = replicate(_lowerCAmelCase ) __lowercase = shard(_lowerCAmelCase ) __lowercase = jax.random.PRNGKey(0 ) __lowercase = jax.random.split(_lowerCAmelCase , jax.device_count() ) __lowercase = sd_pipe(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_inference_steps=25 , jit=_lowerCAmelCase )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) __lowercase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __lowercase = images[0, 253:256, 253:256, -1] __lowercase = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __lowercase = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] ) print(F'output_slice: {output_slice}' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 def _a ( self : str ) -> List[Any]: """simple docstring""" __lowercase = """stabilityai/stable-diffusion-2""" __lowercase , __lowercase = FlaxDPMSolverMultistepScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" ) __lowercase , __lowercase = FlaxStableDiffusionPipeline.from_pretrained( _lowerCAmelCase , scheduler=_lowerCAmelCase , revision="""bf16""" , dtype=jnp.bfloataa , ) __lowercase = scheduler_params __lowercase = """A painting of a squirrel eating a burger""" __lowercase = jax.device_count() __lowercase = num_samples * [prompt] __lowercase = sd_pipe.prepare_inputs(_lowerCAmelCase ) __lowercase = replicate(_lowerCAmelCase ) __lowercase = shard(_lowerCAmelCase ) __lowercase = jax.random.PRNGKey(0 ) __lowercase = jax.random.split(_lowerCAmelCase , jax.device_count() ) __lowercase = sd_pipe(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_inference_steps=25 , jit=_lowerCAmelCase )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) __lowercase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __lowercase = images[0, 253:256, 253:256, -1] __lowercase = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __lowercase = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] ) print(F'output_slice: {output_slice}' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
53
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCamelCase : List[Any] = logging.get_logger(__name__) __UpperCamelCase : Dict = { """google/vivit-b-16x2-kinetics400""": ( """https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json""" ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class __UpperCamelCase ( _lowerCAmelCase ): __snake_case :Dict = 'vivit' def __init__( self : str , _lowerCAmelCase : Any=224 , _lowerCAmelCase : int=32 , _lowerCAmelCase : Dict=[2, 16, 16] , _lowerCAmelCase : Optional[int]=3 , _lowerCAmelCase : str=768 , _lowerCAmelCase : Optional[int]=12 , _lowerCAmelCase : Optional[Any]=12 , _lowerCAmelCase : Tuple=3072 , _lowerCAmelCase : Union[str, Any]="gelu_fast" , _lowerCAmelCase : int=0.0 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : Optional[int]=0.02 , _lowerCAmelCase : Union[str, Any]=1e-06 , _lowerCAmelCase : Optional[int]=True , **_lowerCAmelCase : int , ) -> str: """simple docstring""" __lowercase = hidden_size __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = intermediate_size __lowercase = hidden_act __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = initializer_range __lowercase = layer_norm_eps __lowercase = image_size __lowercase = num_frames __lowercase = tubelet_size __lowercase = num_channels __lowercase = qkv_bias super().__init__(**_lowerCAmelCase )
53
import heapq import sys import numpy as np __UpperCamelCase : List[str] = tuple[int, int] class __UpperCamelCase : def __init__( self : Optional[int] ) -> Dict: """simple docstring""" __lowercase = [] __lowercase = set() def _a ( self : int ) -> List[Any]: """simple docstring""" if not self.empty(): return self.elements[0][0] else: return float("""inf""" ) def _a ( self : Dict ) -> Optional[Any]: """simple docstring""" return len(self.elements ) == 0 def _a ( self : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] ) -> Dict: """simple docstring""" if item not in self.set: heapq.heappush(self.elements , (priority, item) ) self.set.add(_lowerCAmelCase ) else: # update # print("update", item) __lowercase = [] ((__lowercase) , (__lowercase)) = heapq.heappop(self.elements ) while x != item: temp.append((pri, x) ) ((__lowercase) , (__lowercase)) = heapq.heappop(self.elements ) temp.append((priority, item) ) for pro, xxx in temp: heapq.heappush(self.elements , (pro, xxx) ) def _a ( self : List[str] , _lowerCAmelCase : Optional[Any] ) -> Dict: """simple docstring""" if item in self.set: self.set.remove(_lowerCAmelCase ) __lowercase = [] ((__lowercase) , (__lowercase)) = heapq.heappop(self.elements ) while x != item: temp.append((pro, x) ) ((__lowercase) , (__lowercase)) = heapq.heappop(self.elements ) for prito, yyy in temp: heapq.heappush(self.elements , (prito, yyy) ) def _a ( self : Any ) -> List[Any]: """simple docstring""" return self.elements[0][1] def _a ( self : Optional[int] ) -> List[Any]: """simple docstring""" ((__lowercase) , (__lowercase)) = heapq.heappop(self.elements ) self.set.remove(_lowerCAmelCase ) return (priority, item) def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = np.array(lowerCamelCase ) __lowercase = np.array(lowerCamelCase ) return np.linalg.norm(a - b ) def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' return consistent_heuristic(lowerCamelCase , lowerCamelCase ) // t def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] ) def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = g_function[start] + Wa * heuristics[i](lowerCamelCase , lowerCamelCase ) return ans def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = np.chararray((n, n) ) for i in range(lowerCamelCase ): for j in range(lowerCamelCase ): __lowercase = """*""" for i in range(lowerCamelCase ): for j in range(lowerCamelCase ): if (j, (n - 1) - i) in blocks: __lowercase = """#""" __lowercase = """-""" __lowercase = back_pointer[goal] while x != start: ((__lowercase) , (__lowercase)) = x # print(x) __lowercase = """-""" __lowercase = back_pointer[x] __lowercase = """-""" for i in range(lowerCamelCase ): for j in range(lowerCamelCase ): if (i, j) == (0, n - 1): print(grid[i][j] , end=""" """ ) print("""<-- End position""" , end=""" """ ) else: print(grid[i][j] , end=""" """ ) print() print("""^""" ) print("""Start position""" ) print() print("""# is an obstacle""" ) print("""- is the path taken by algorithm""" ) print("""PATH TAKEN BY THE ALGORITHM IS:-""" ) __lowercase = back_pointer[goal] while x != start: print(lowerCamelCase , end=""" """ ) __lowercase = back_pointer[x] print(lowerCamelCase ) sys.exit() def snake_case ( lowerCamelCase ): '''simple docstring''' if p[0] < 0 or p[0] > n - 1: return False if p[1] < 0 or p[1] > n - 1: return False return True def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ): '''simple docstring''' for itera in range(lowerCamelCase ): open_list[itera].remove_element(lowerCamelCase ) # print("s", s) # print("j", j) ((__lowercase) , (__lowercase)) = s __lowercase = (x - 1, y) __lowercase = (x + 1, y) __lowercase = (x, y + 1) __lowercase = (x, y - 1) for neighbours in [left, right, up, down]: if neighbours not in blocks: if valid(lowerCamelCase ) and neighbours not in visited: # print("neighbour", neighbours) visited.add(lowerCamelCase ) __lowercase = -1 __lowercase = float("""inf""" ) if valid(lowerCamelCase ) and g_function[neighbours] > g_function[s] + 1: __lowercase = g_function[s] + 1 __lowercase = s if neighbours not in close_list_anchor: open_list[0].put(lowerCamelCase , key(lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase ) ) if neighbours not in close_list_inad: for var in range(1 , lowerCamelCase ): if key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) <= Wa * key( lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase ): open_list[j].put( lowerCamelCase , key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) ) def snake_case ( ): '''simple docstring''' __lowercase = [] for x in range(1 , 5 ): for y in range(1 , 6 ): some_list.append((x, y) ) for x in range(15 , 20 ): some_list.append((x, 17) ) for x in range(10 , 19 ): for y in range(1 , 15 ): some_list.append((x, y) ) # L block for x in range(1 , 4 ): for y in range(12 , 19 ): some_list.append((x, y) ) for x in range(3 , 13 ): for y in range(16 , 19 ): some_list.append((x, y) ) return some_list __UpperCamelCase : Optional[int] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a} __UpperCamelCase : Optional[Any] = [ (0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1), (11, 1), (12, 1), (13, 1), (14, 1), (15, 1), (16, 1), (17, 1), (18, 1), (19, 1), ] __UpperCamelCase : Optional[Any] = make_common_ground() __UpperCamelCase : Dict = blocks_blk # hyper parameters __UpperCamelCase : Union[str, Any] = 1 __UpperCamelCase : Union[str, Any] = 1 __UpperCamelCase : Optional[int] = 20 __UpperCamelCase : List[str] = 3 # one consistent and two other inconsistent # start and end destination __UpperCamelCase : str = (0, 0) __UpperCamelCase : str = (n - 1, n - 1) __UpperCamelCase : Optional[Any] = 1 def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = {start: 0, goal: float("""inf""" )} __lowercase = {start: -1, goal: -1} __lowercase = [] __lowercase = set() for i in range(lowerCamelCase ): open_list.append(PriorityQueue() ) open_list[i].put(lowerCamelCase , key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) ) __lowercase = [] __lowercase = [] while open_list[0].minkey() < float("""inf""" ): for i in range(1 , lowerCamelCase ): # print(open_list[0].minkey(), open_list[i].minkey()) if open_list[i].minkey() <= Wa * open_list[0].minkey(): global t t += 1 if g_function[goal] <= open_list[i].minkey(): if g_function[goal] < float("""inf""" ): do_something(lowerCamelCase , lowerCamelCase , lowerCamelCase ) else: __lowercase , __lowercase = open_list[i].top_show() visited.add(lowerCamelCase ) expand_state( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) close_list_inad.append(lowerCamelCase ) else: if g_function[goal] <= open_list[0].minkey(): if g_function[goal] < float("""inf""" ): do_something(lowerCamelCase , lowerCamelCase , lowerCamelCase ) else: __lowercase = open_list[0].top_show() visited.add(lowerCamelCase ) expand_state( lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) close_list_anchor.append(lowerCamelCase ) print("""No path found to goal""" ) print() for i in range(n - 1 , -1 , -1 ): for j in range(lowerCamelCase ): if (j, i) in blocks: print("""#""" , end=""" """ ) elif (j, i) in back_pointer: if (j, i) == (n - 1, n - 1): print("""*""" , end=""" """ ) else: print("""-""" , end=""" """ ) else: print("""*""" , end=""" """ ) if (j, i) == (n - 1, n - 1): print("""<-- End position""" , end=""" """ ) print() print("""^""" ) print("""Start position""" ) print() print("""# is an obstacle""" ) print("""- is the path taken by algorithm""" ) if __name__ == "__main__": multi_a_star(start, goal, n_heuristic)
53
1
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCamelCase : Dict = logging.get_logger(__name__) __UpperCamelCase : List[Any] = { """microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""", } class __UpperCamelCase ( _lowerCAmelCase ): __snake_case :List[Any] = 'git_vision_model' def __init__( self : Tuple , _lowerCAmelCase : Optional[Any]=768 , _lowerCAmelCase : Union[str, Any]=3072 , _lowerCAmelCase : Optional[int]=12 , _lowerCAmelCase : Optional[Any]=12 , _lowerCAmelCase : Optional[Any]=3 , _lowerCAmelCase : List[str]=224 , _lowerCAmelCase : List[Any]=16 , _lowerCAmelCase : Tuple="quick_gelu" , _lowerCAmelCase : Any=1e-5 , _lowerCAmelCase : int=0.0 , _lowerCAmelCase : Dict=0.02 , **_lowerCAmelCase : List[str] , ) -> Union[str, Any]: """simple docstring""" super().__init__(**_lowerCAmelCase ) __lowercase = hidden_size __lowercase = intermediate_size __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = num_channels __lowercase = patch_size __lowercase = image_size __lowercase = initializer_range __lowercase = attention_dropout __lowercase = layer_norm_eps __lowercase = hidden_act @classmethod def _a ( cls : Optional[int] , _lowerCAmelCase : Union[str, os.PathLike] , **_lowerCAmelCase : int ) -> "PretrainedConfig": """simple docstring""" cls._set_token_in_kwargs(_lowerCAmelCase ) __lowercase , __lowercase = cls.get_config_dict(_lowerCAmelCase , **_lowerCAmelCase ) # get the vision config dict if we are loading from GITConfig if config_dict.get("""model_type""" ) == "git": __lowercase = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(_lowerCAmelCase , **_lowerCAmelCase ) class __UpperCamelCase ( _lowerCAmelCase ): __snake_case :Dict = 'git' def __init__( self : Optional[Any] , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Optional[Any]=3_0522 , _lowerCAmelCase : Any=768 , _lowerCAmelCase : Any=6 , _lowerCAmelCase : Optional[Any]=12 , _lowerCAmelCase : Any=3072 , _lowerCAmelCase : Dict="gelu" , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : Any=1024 , _lowerCAmelCase : int=0.02 , _lowerCAmelCase : int=1e-12 , _lowerCAmelCase : Optional[Any]=0 , _lowerCAmelCase : Optional[Any]="absolute" , _lowerCAmelCase : str=True , _lowerCAmelCase : List[Any]=False , _lowerCAmelCase : str=101 , _lowerCAmelCase : Dict=102 , _lowerCAmelCase : str=None , **_lowerCAmelCase : int , ) -> str: """simple docstring""" super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , pad_token_id=_lowerCAmelCase , **_lowerCAmelCase ) if vision_config is None: __lowercase = {} logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" ) __lowercase = GitVisionConfig(**_lowerCAmelCase ) __lowercase = vocab_size __lowercase = hidden_size __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = hidden_act __lowercase = intermediate_size __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = max_position_embeddings __lowercase = initializer_range __lowercase = layer_norm_eps __lowercase = position_embedding_type __lowercase = use_cache __lowercase = tie_word_embeddings __lowercase = num_image_with_embedding __lowercase = bos_token_id __lowercase = eos_token_id def _a ( self : List[Any] ) -> Optional[int]: """simple docstring""" __lowercase = copy.deepcopy(self.__dict__ ) __lowercase = self.vision_config.to_dict() __lowercase = self.__class__.model_type return output
53
import argparse import json import pickle from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() __UpperCamelCase : Optional[int] = logging.get_logger(__name__) def snake_case ( lowerCamelCase ): '''simple docstring''' __lowercase = SwinConfig.from_pretrained( """microsoft/swin-tiny-patch4-window7-224""" , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ) __lowercase = MaskFormerConfig(backbone_config=lowerCamelCase ) __lowercase = """huggingface/label-files""" if "ade20k-full" in model_name: # this should be ok __lowercase = 847 __lowercase = """maskformer-ade20k-full-id2label.json""" elif "ade" in model_name: # this should be ok __lowercase = 150 __lowercase = """ade20k-id2label.json""" elif "coco-stuff" in model_name: # this should be ok __lowercase = 171 __lowercase = """maskformer-coco-stuff-id2label.json""" elif "coco" in model_name: # TODO __lowercase = 133 __lowercase = """coco-panoptic-id2label.json""" elif "cityscapes" in model_name: # this should be ok __lowercase = 19 __lowercase = """cityscapes-id2label.json""" elif "vistas" in model_name: # this should be ok __lowercase = 65 __lowercase = """mapillary-vistas-id2label.json""" __lowercase = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""" ) , """r""" ) ) __lowercase = {int(lowerCamelCase ): v for k, v in idalabel.items()} return config def snake_case ( lowerCamelCase ): '''simple docstring''' __lowercase = [] # stem # fmt: off rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") ) rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") ) rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") ) rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') ) if i < 3: rename_keys.append((F'backbone.layers.{i}.downsample.reduction.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') ) rename_keys.append((F'backbone.layers.{i}.downsample.norm.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') ) rename_keys.append((F'backbone.layers.{i}.downsample.norm.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') ) rename_keys.append((F'backbone.norm{i}.weight', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') ) rename_keys.append((F'backbone.norm{i}.bias', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') ) # FPN rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") ) rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") ) rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") ) for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ): rename_keys.append((F'sem_seg_head.adapter_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') ) rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') ) rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') ) rename_keys.append((F'sem_seg_head.layer_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') ) rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') ) rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') ) rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") ) rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") ) # Transformer decoder for idx in range(config.decoder_config.decoder_layers ): # self-attention out projection rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') ) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') ) # cross-attention out projection rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') ) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') ) # MLP 1 rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', F'model.transformer_module.decoder.layers.{idx}.fc1.weight') ) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', F'model.transformer_module.decoder.layers.{idx}.fc1.bias') ) # MLP 2 rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', F'model.transformer_module.decoder.layers.{idx}.fc2.weight') ) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', F'model.transformer_module.decoder.layers.{idx}.fc2.bias') ) # layernorm 1 (self-attention layernorm) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') ) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') ) # layernorm 2 (cross-attention layernorm) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') ) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') ) # layernorm 3 (final layernorm) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') ) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') ) rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") ) rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") ) # heads on top rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") ) rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") ) rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") ) rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") ) rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") ) for i in range(3 ): rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.weight', F'mask_embedder.{i}.0.weight') ) rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.bias', F'mask_embedder.{i}.0.bias') ) # fmt: on return rename_keys def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = dct.pop(lowerCamelCase ) __lowercase = val def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): __lowercase = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) __lowercase = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' ) __lowercase = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict __lowercase = in_proj_weight[:dim, :] __lowercase = in_proj_bias[: dim] __lowercase = in_proj_weight[ dim : dim * 2, : ] __lowercase = in_proj_bias[ dim : dim * 2 ] __lowercase = in_proj_weight[ -dim :, : ] __lowercase = in_proj_bias[-dim :] # fmt: on def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = config.decoder_config.hidden_size for idx in range(config.decoder_config.decoder_layers ): # read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias) __lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' ) __lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict __lowercase = in_proj_weight[: hidden_size, :] __lowercase = in_proj_bias[:config.hidden_size] __lowercase = in_proj_weight[hidden_size : hidden_size * 2, :] __lowercase = in_proj_bias[hidden_size : hidden_size * 2] __lowercase = in_proj_weight[-hidden_size :, :] __lowercase = in_proj_bias[-hidden_size :] # read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias) __lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' ) __lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict __lowercase = in_proj_weight[: hidden_size, :] __lowercase = in_proj_bias[:config.hidden_size] __lowercase = in_proj_weight[hidden_size : hidden_size * 2, :] __lowercase = in_proj_bias[hidden_size : hidden_size * 2] __lowercase = in_proj_weight[-hidden_size :, :] __lowercase = in_proj_bias[-hidden_size :] # fmt: on def snake_case ( ): '''simple docstring''' __lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg""" __lowercase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw ) return im @torch.no_grad() def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = False ): '''simple docstring''' __lowercase = get_maskformer_config(lowerCamelCase ) # load original state_dict with open(lowerCamelCase , """rb""" ) as f: __lowercase = pickle.load(lowerCamelCase ) __lowercase = data["""model"""] # for name, param in state_dict.items(): # print(name, param.shape) # rename keys __lowercase = create_rename_keys(lowerCamelCase ) for src, dest in rename_keys: rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase ) read_in_swin_q_k_v(lowerCamelCase , config.backbone_config ) read_in_decoder_q_k_v(lowerCamelCase , lowerCamelCase ) # update to torch tensors for key, value in state_dict.items(): __lowercase = torch.from_numpy(lowerCamelCase ) # load 🤗 model __lowercase = MaskFormerForInstanceSegmentation(lowerCamelCase ) model.eval() for name, param in model.named_parameters(): print(lowerCamelCase , param.shape ) __lowercase , __lowercase = model.load_state_dict(lowerCamelCase , strict=lowerCamelCase ) assert missing_keys == [ "model.pixel_level_module.encoder.model.layernorm.weight", "model.pixel_level_module.encoder.model.layernorm.bias", ] assert len(lowerCamelCase ) == 0, F'Unexpected keys: {unexpected_keys}' # verify results __lowercase = prepare_img() if "vistas" in model_name: __lowercase = 65 elif "cityscapes" in model_name: __lowercase = 65_535 else: __lowercase = 255 __lowercase = True if """ade""" in model_name else False __lowercase = MaskFormerImageProcessor(ignore_index=lowerCamelCase , reduce_labels=lowerCamelCase ) __lowercase = image_processor(lowerCamelCase , return_tensors="""pt""" ) __lowercase = model(**lowerCamelCase ) print("""Logits:""" , outputs.class_queries_logits[0, :3, :3] ) if model_name == "maskformer-swin-tiny-ade": __lowercase = torch.tensor( [[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] ) assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase , atol=1e-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(F'Saving model and image processor to {pytorch_dump_folder_path}' ) Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase ) model.save_pretrained(lowerCamelCase ) image_processor.save_pretrained(lowerCamelCase ) if push_to_hub: print("""Pushing model and image processor to the hub...""" ) model.push_to_hub(F'nielsr/{model_name}' ) image_processor.push_to_hub(F'nielsr/{model_name}' ) if __name__ == "__main__": __UpperCamelCase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""maskformer-swin-tiny-ade""", type=str, help=("""Name of the MaskFormer model you'd like to convert""",), ) parser.add_argument( """--checkpoint_path""", default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""", type=str, help="""Path to the original state dict (.pth file).""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) __UpperCamelCase : List[Any] = parser.parse_args() convert_maskformer_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
53
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCamelCase : Optional[int] = { """configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Dict = [ """PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""", """PegasusXForConditionalGeneration""", """PegasusXModel""", """PegasusXPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pegasus_x import ( PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST, PegasusXForConditionalGeneration, PegasusXModel, PegasusXPreTrainedModel, ) else: import sys __UpperCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
53
from math import sqrt def snake_case ( lowerCamelCase ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(sqrt(lowerCamelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def snake_case ( lowerCamelCase = 10_001 ): '''simple docstring''' __lowercase = 0 __lowercase = 1 while count != nth and number < 3: number += 1 if is_prime(lowerCamelCase ): count += 1 while count != nth: number += 2 if is_prime(lowerCamelCase ): count += 1 return number if __name__ == "__main__": print(F'''{solution() = }''')
53
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCamelCase : Optional[Any] = { """configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""], """processing_git""": ["""GitProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Tuple = [ """GIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """GitForCausalLM""", """GitModel""", """GitPreTrainedModel""", """GitVisionModel""", ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys __UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
53
from __future__ import annotations import collections import tempfile import unittest import numpy as np from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import is_tf_available, is_vision_available from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_tf_bert import TFBertModelTester from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester from ..deit.test_modeling_tf_deit import TFDeiTModelTester from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester from ..vit.test_modeling_tf_vit import TFViTModelTester if is_tf_available(): from transformers import ( TFBertModel, TFCLIPVisionModel, TFDeiTModel, TFRobertaModel, TFVisionTextDualEncoderModel, TFViTModel, VisionTextDualEncoderConfig, ) if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor def snake_case ( lowerCamelCase ): '''simple docstring''' if isinstance(lowerCamelCase , collections.abc.Iterable ): return x return (x, x) @require_tf class __UpperCamelCase : def _a ( self : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] ) -> Union[str, Any]: """simple docstring""" pass def _a ( self : Dict ) -> Optional[int]: """simple docstring""" pass def _a ( self : Any ) -> Optional[Any]: """simple docstring""" pass def _a ( self : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict=None , **_lowerCAmelCase : int ) -> str: """simple docstring""" __lowercase = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCAmelCase , _lowerCAmelCase ) __lowercase = TFVisionTextDualEncoderModel(_lowerCAmelCase ) __lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) ) def _a ( self : str , _lowerCAmelCase : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : int=None , **_lowerCAmelCase : Dict ) -> Tuple: """simple docstring""" __lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase ) __lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase ) __lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) ) def _a ( self : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : str , _lowerCAmelCase : Any=None , **_lowerCAmelCase : str ) -> Tuple: """simple docstring""" __lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase ) __lowercase = {"""vision_model""": vision_model, """text_model""": text_model} __lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCAmelCase ) __lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) ) def _a ( self : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any]=None , **_lowerCAmelCase : Any ) -> Optional[int]: """simple docstring""" __lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase ) __lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase ) __lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase ) __lowercase = output[0].numpy() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_lowerCAmelCase ) __lowercase = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase ) __lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase ) __lowercase = after_output[0].numpy() __lowercase = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_lowerCAmelCase , 1e-5 ) def _a ( self : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int]=None , **_lowerCAmelCase : Optional[int] ) -> List[Any]: """simple docstring""" __lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase ) __lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase ) __lowercase = model( input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase ) __lowercase = output.vision_model_output.attentions self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) __lowercase = to_atuple(vision_model.config.image_size ) __lowercase = to_atuple(vision_model.config.patch_size ) __lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) __lowercase = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) __lowercase = output.text_model_output.attentions self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def _a ( self : List[Any] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : float ) -> Optional[int]: """simple docstring""" __lowercase = np.abs((a - b) ).max() self.assertLessEqual(_lowerCAmelCase , _lowerCAmelCase , F'Difference between torch and flax is {diff} (>= {tol}).' ) def _a ( self : List[Any] ) -> List[Any]: """simple docstring""" __lowercase = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_model(**_lowerCAmelCase ) def _a ( self : int ) -> List[Any]: """simple docstring""" __lowercase = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**_lowerCAmelCase ) def _a ( self : Union[str, Any] ) -> int: """simple docstring""" __lowercase = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**_lowerCAmelCase ) def _a ( self : List[str] ) -> str: """simple docstring""" __lowercase = self.prepare_config_and_inputs() self.check_save_load(**_lowerCAmelCase ) def _a ( self : Dict ) -> Union[str, Any]: """simple docstring""" __lowercase = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**_lowerCAmelCase ) @slow def _a ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" __lowercase , __lowercase = self.get_pretrained_model_and_inputs() __lowercase = model_a(**_lowerCAmelCase ) __lowercase = outputs[0].numpy() with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(_lowerCAmelCase ) __lowercase = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase ) __lowercase = model_a(**_lowerCAmelCase ) __lowercase = after_outputs[0].numpy() __lowercase = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_lowerCAmelCase , 1e-5 ) @require_tf class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ): def _a ( self : Optional[int] ) -> List[str]: """simple docstring""" __lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" ) __lowercase = 13 __lowercase = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) __lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) __lowercase = random_attention_mask([batch_size, 4] ) __lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def _a ( self : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] ) -> List[str]: """simple docstring""" __lowercase = TFViTModel(_lowerCAmelCase , name="""vision_model""" ) __lowercase = TFBertModel(_lowerCAmelCase , name="""text_model""" ) return vision_model, text_model def _a ( self : Dict ) -> Optional[Any]: """simple docstring""" __lowercase = TFViTModelTester(self ) __lowercase = TFBertModelTester(self ) __lowercase = vit_model_tester.prepare_config_and_inputs() __lowercase = bert_model_tester.prepare_config_and_inputs() __lowercase , __lowercase , __lowercase = vision_config_and_inputs ( ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ): def _a ( self : Tuple ) -> Any: """simple docstring""" __lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained( """Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" ) __lowercase = 13 __lowercase = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) __lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) __lowercase = random_attention_mask([batch_size, 4] ) __lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def _a ( self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : int=None , **_lowerCAmelCase : Tuple ) -> Dict: """simple docstring""" __lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase ) __lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase ) __lowercase = model( input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase ) __lowercase = output.vision_model_output.attentions self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers ) # in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) __lowercase = to_atuple(vision_model.config.image_size ) __lowercase = to_atuple(vision_model.config.patch_size ) __lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) __lowercase = num_patches + 2 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) __lowercase = output.text_model_output.attentions self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def _a ( self : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict ) -> int: """simple docstring""" __lowercase = TFDeiTModel(_lowerCAmelCase , name="""vision_model""" ) __lowercase = TFRobertaModel(_lowerCAmelCase , name="""text_model""" ) return vision_model, text_model def _a ( self : Tuple ) -> str: """simple docstring""" __lowercase = TFDeiTModelTester(self ) __lowercase = TFRobertaModelTester(self ) __lowercase = vit_model_tester.prepare_config_and_inputs() __lowercase = bert_model_tester.prepare_config_and_inputs() __lowercase , __lowercase , __lowercase = vision_config_and_inputs ( ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ): def _a ( self : int ) -> Union[str, Any]: """simple docstring""" __lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained( """Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" ) __lowercase = 13 __lowercase = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) __lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) __lowercase = random_attention_mask([batch_size, 4] ) __lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def _a ( self : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] ) -> Dict: """simple docstring""" __lowercase = TFCLIPVisionModel(_lowerCAmelCase , name="""vision_model""" ) __lowercase = TFBertModel(_lowerCAmelCase , name="""text_model""" ) return vision_model, text_model def _a ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" __lowercase = TFCLIPVisionModelTester(self ) __lowercase = TFBertModelTester(self ) __lowercase = clip_model_tester.prepare_config_and_inputs() __lowercase = bert_model_tester.prepare_config_and_inputs() __lowercase , __lowercase = vision_config_and_inputs ( ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_vision @require_tf class __UpperCamelCase ( unittest.TestCase ): @slow def _a ( self : int ) -> Tuple: """simple docstring""" __lowercase = TFVisionTextDualEncoderModel.from_pretrained( """clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=_lowerCAmelCase ) __lowercase = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" ) __lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) __lowercase = processor( text=["""una foto di un gatto""", """una foto di un cane"""] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="""np""" ) __lowercase = model(**_lowerCAmelCase ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) __lowercase = np.array([[1.2_284_727, 0.3_104_122]] ) self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _lowerCAmelCase , atol=1e-3 ) )
53
1
import numpy as np from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' if (ksize % 2) == 0: __lowercase = ksize + 1 __lowercase = np.zeros((ksize, ksize) , dtype=np.floataa ) # each value for y in range(lowerCamelCase ): for x in range(lowerCamelCase ): # distance from center __lowercase = x - ksize // 2 __lowercase = y - ksize // 2 # degree to radiant __lowercase = theta / 180 * np.pi __lowercase = np.cos(_theta ) __lowercase = np.sin(_theta ) # get kernel x __lowercase = cos_theta * px + sin_theta * py # get kernel y __lowercase = -sin_theta * px + cos_theta * py # fill kernel __lowercase = np.exp( -(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi ) return gabor if __name__ == "__main__": import doctest doctest.testmod() # read original image __UpperCamelCase : List[Any] = imread("""../image_data/lena.jpg""") # turn image in gray scale value __UpperCamelCase : Union[str, Any] = cvtColor(img, COLOR_BGR2GRAY) # Apply multiple Kernel to detect edges __UpperCamelCase : Union[str, Any] = np.zeros(gray.shape[:2]) for theta in [0, 30, 60, 90, 120, 150]: __UpperCamelCase : Tuple = gabor_filter_kernel(10, 8, theta, 10, 0, 0) out += filteraD(gray, CV_8UC3, kernel_aa) __UpperCamelCase : List[str] = out / out.max() * 255 __UpperCamelCase : List[str] = out.astype(np.uinta) imshow("""Original""", gray) imshow("""Gabor filter with 20x20 mask and 6 directions""", out) waitKey(0)
53
from __future__ import annotations from scipy.special import comb # type: ignore class __UpperCamelCase : def __init__( self : int , _lowerCAmelCase : list[tuple[float, float]] ) -> Any: """simple docstring""" __lowercase = list_of_points # Degree determines the flexibility of the curve. # Degree = 1 will produce a straight line. __lowercase = len(_lowerCAmelCase ) - 1 def _a ( self : Tuple , _lowerCAmelCase : float ) -> list[float]: """simple docstring""" assert 0 <= t <= 1, "Time t must be between 0 and 1." __lowercase = [] for i in range(len(self.list_of_points ) ): # basis function for each i output_values.append( comb(self.degree , _lowerCAmelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) ) # the basis must sum up to 1 for it to produce a valid Bezier curve. assert round(sum(_lowerCAmelCase ) , 5 ) == 1 return output_values def _a ( self : List[str] , _lowerCAmelCase : float ) -> tuple[float, float]: """simple docstring""" assert 0 <= t <= 1, "Time t must be between 0 and 1." __lowercase = self.basis_function(_lowerCAmelCase ) __lowercase = 0.0 __lowercase = 0.0 for i in range(len(self.list_of_points ) ): # For all points, sum up the product of i-th basis function and i-th point. x += basis_function[i] * self.list_of_points[i][0] y += basis_function[i] * self.list_of_points[i][1] return (x, y) def _a ( self : Optional[int] , _lowerCAmelCase : float = 0.01 ) -> Union[str, Any]: """simple docstring""" from matplotlib import pyplot as plt # type: ignore __lowercase = [] # x coordinates of points to plot __lowercase = [] # y coordinates of points to plot __lowercase = 0.0 while t <= 1: __lowercase = self.bezier_curve_function(_lowerCAmelCase ) to_plot_x.append(value[0] ) to_plot_y.append(value[1] ) t += step_size __lowercase = [i[0] for i in self.list_of_points] __lowercase = [i[1] for i in self.list_of_points] plt.plot( _lowerCAmelCase , _lowerCAmelCase , color="""blue""" , label="""Curve of Degree """ + str(self.degree ) , ) plt.scatter(_lowerCAmelCase , _lowerCAmelCase , color="""red""" , label="""Control Points""" ) plt.legend() plt.show() if __name__ == "__main__": import doctest doctest.testmod() BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1 BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2 BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
53
1
from sklearn.metrics import recall_score import datasets __UpperCamelCase : Optional[Any] = """ Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation: Recall = TP / (TP + FN) Where TP is the true positives and FN is the false negatives. """ __UpperCamelCase : Any = """ Args: - **predictions** (`list` of `int`): The predicted labels. - **references** (`list` of `int`): The ground truth labels. - **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None. - **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`. - **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`. - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary. - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives. - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall. - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). - **sample_weight** (`list` of `float`): Sample weights Defaults to `None`. - **zero_division** (): Sets the value to return when there is a zero division. Defaults to . - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised. - `0`: If there is a zero division, the return value is `0`. - `1`: If there is a zero division, the return value is `1`. Returns: - **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better. Examples: Example 1-A simple example with some errors >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1]) >>> print(results) {'recall': 0.6666666666666666} Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`. >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0) >>> print(results) {'recall': 0.5} Example 3-The same example as Example 1, but with `sample_weight` included. >>> recall_metric = datasets.load_metric('recall') >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8] >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight) >>> print(results) {'recall': 0.55} Example 4-A multiclass example, using different averages. >>> recall_metric = datasets.load_metric('recall') >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {'recall': array([1., 0., 0.])} """ __UpperCamelCase : Any = """ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __UpperCamelCase ( datasets.Metric ): def _a ( self : Dict ) -> List[str]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""int32""" ) ), """references""": datasets.Sequence(datasets.Value("""int32""" ) ), } if self.config_name == """multilabel""" else { """predictions""": datasets.Value("""int32""" ), """references""": datasets.Value("""int32""" ), } ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , ) def _a ( self : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : str=1 , _lowerCAmelCase : Optional[int]="binary" , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Union[str, Any]="warn" , ) -> Optional[Any]: """simple docstring""" __lowercase = recall_score( _lowerCAmelCase , _lowerCAmelCase , labels=_lowerCAmelCase , pos_label=_lowerCAmelCase , average=_lowerCAmelCase , sample_weight=_lowerCAmelCase , zero_division=_lowerCAmelCase , ) return {"recall": float(_lowerCAmelCase ) if score.size == 1 else score}
53
import inspect import unittest from typing import List import numpy as np from transformers import EfficientFormerConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, ) from transformers.models.efficientformer.modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_vision_available(): from PIL import Image from transformers import EfficientFormerImageProcessor class __UpperCamelCase : def __init__( self : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : int = 13 , _lowerCAmelCase : int = 64 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 3 , _lowerCAmelCase : int = 3 , _lowerCAmelCase : bool = True , _lowerCAmelCase : bool = True , _lowerCAmelCase : int = 128 , _lowerCAmelCase : Optional[int]=[16, 32, 64, 128] , _lowerCAmelCase : int = 7 , _lowerCAmelCase : int = 4 , _lowerCAmelCase : int = 37 , _lowerCAmelCase : str = "gelu" , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : int = 10 , _lowerCAmelCase : float = 0.02 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 128 , _lowerCAmelCase : List[int] = [2, 2, 2, 2] , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 2 , ) -> Tuple: """simple docstring""" __lowercase = parent __lowercase = batch_size __lowercase = image_size __lowercase = patch_size __lowercase = num_channels __lowercase = is_training __lowercase = use_labels __lowercase = hidden_size __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = intermediate_size __lowercase = hidden_act __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = type_sequence_label_size __lowercase = initializer_range __lowercase = encoder_stride __lowercase = num_attention_outputs __lowercase = embed_dim __lowercase = embed_dim + 1 __lowercase = resolution __lowercase = depths __lowercase = hidden_sizes __lowercase = dim __lowercase = mlp_expansion_ratio def _a ( self : List[Any] ) -> Optional[int]: """simple docstring""" __lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowercase = None if self.use_labels: __lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase = self.get_config() return config, pixel_values, labels def _a ( self : Optional[Any] ) -> str: """simple docstring""" return EfficientFormerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , ) def _a ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] ) -> Optional[int]: """simple docstring""" __lowercase = TFEfficientFormerModel(config=_lowerCAmelCase ) __lowercase = model(_lowerCAmelCase , training=_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict ) -> Optional[Any]: """simple docstring""" __lowercase = self.type_sequence_label_size __lowercase = TFEfficientFormerForImageClassification(_lowerCAmelCase ) __lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images __lowercase = 1 __lowercase = TFEfficientFormerForImageClassification(_lowerCAmelCase ) __lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _a ( self : Optional[Any] ) -> Any: """simple docstring""" __lowercase = self.prepare_config_and_inputs() __lowercase , __lowercase , __lowercase = config_and_inputs __lowercase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): __snake_case :Any = ( ( TFEfficientFormerModel, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerForImageClassification, ) if is_tf_available() else () ) __snake_case :Any = ( { 'feature-extraction': TFEfficientFormerModel, 'image-classification': ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, ), } if is_tf_available() else {} ) __snake_case :int = False __snake_case :Optional[int] = False __snake_case :int = False __snake_case :Any = False __snake_case :Any = False def _a ( self : Tuple ) -> Tuple: """simple docstring""" __lowercase = TFEfficientFormerModelTester(self ) __lowercase = ConfigTester( self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 ) def _a ( self : Optional[int] ) -> int: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="""EfficientFormer does not use inputs_embeds""" ) def _a ( self : Optional[int] ) -> Any: """simple docstring""" pass @unittest.skip(reason="""EfficientFormer does not support input and output embeddings""" ) def _a ( self : int ) -> str: """simple docstring""" pass def _a ( self : Union[str, Any] ) -> List[str]: """simple docstring""" __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase = model_class(_lowerCAmelCase ) __lowercase = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase = [*signature.parameters.keys()] __lowercase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , _lowerCAmelCase ) def _a ( self : Union[str, Any] ) -> Dict: """simple docstring""" def check_hidden_states_output(_lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] ): __lowercase = model_class(_lowerCAmelCase ) __lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase ) __lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __lowercase = getattr( self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase ) if hasattr(self.model_tester , """encoder_seq_length""" ): __lowercase = self.model_tester.encoder_seq_length if hasattr(self.model_tester , """chunk_length""" ) and self.model_tester.chunk_length > 1: __lowercase = seq_length * self.model_tester.chunk_length else: __lowercase = self.model_tester.seq_length self.assertListEqual( list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) if config.is_encoder_decoder: __lowercase = outputs.decoder_hidden_states self.asseretIsInstance(_lowerCAmelCase , (list, tuple) ) self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase ) __lowercase = getattr(self.model_tester , """seq_length""" , _lowerCAmelCase ) __lowercase = getattr(self.model_tester , """decoder_seq_length""" , _lowerCAmelCase ) self.assertListEqual( list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , ) __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase = True check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowercase = True check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) def _a ( self : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any]=False ) -> Dict: """simple docstring""" __lowercase = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase ) if return_labels: if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def _a ( self : int ) -> int: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) @unittest.skip(reason="""EfficientFormer does not implement masked image modeling yet""" ) def _a ( self : Union[str, Any] ) -> str: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCAmelCase ) def _a ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase ) @slow def _a ( self : List[str] ) -> List[Any]: """simple docstring""" for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase = TFEfficientFormerModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) def _a ( self : Any ) -> List[str]: """simple docstring""" __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() __lowercase = True __lowercase = getattr(self.model_tester , """seq_length""" , _lowerCAmelCase ) __lowercase = getattr(self.model_tester , """encoder_seq_length""" , _lowerCAmelCase ) __lowercase = getattr(self.model_tester , """key_length""" , _lowerCAmelCase ) __lowercase = getattr(self.model_tester , """chunk_length""" , _lowerCAmelCase ) if chunk_length is not None and hasattr(self.model_tester , """num_hashes""" ): __lowercase = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: __lowercase = True __lowercase = False __lowercase = True __lowercase = model_class(_lowerCAmelCase ) __lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase ) __lowercase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_attention_outputs ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __lowercase = True __lowercase = model_class(_lowerCAmelCase ) __lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase ) __lowercase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_attention_outputs ) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , ) else: self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , ) def _a ( self : Dict ) -> Optional[int]: """simple docstring""" __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Prepare our model __lowercase = model_class(_lowerCAmelCase ) # These are maximally general inputs for the model, with multiple None dimensions # Hopefully this will catch any conditionals that fail for flexible shapes __lowercase = { key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=_lowerCAmelCase ) for key, val in model.input_signature.items() if key in model.dummy_inputs } __lowercase = model(_lowerCAmelCase ) self.assertTrue(outputs_dict is not None ) def snake_case ( ): '''simple docstring''' __lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class __UpperCamelCase ( unittest.TestCase ): @cached_property def _a ( self : Optional[Any] ) -> Any: """simple docstring""" return ( EfficientFormerImageProcessor.from_pretrained("""snap-research/efficientformer-l1-300""" ) if is_vision_available() else None ) @slow def _a ( self : Optional[Any] ) -> Any: """simple docstring""" __lowercase = TFEfficientFormerForImageClassification.from_pretrained("""snap-research/efficientformer-l1-300""" ) __lowercase = self.default_image_processor __lowercase = prepare_img() __lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""tf""" ) # forward pass __lowercase = model(**_lowerCAmelCase , training=_lowerCAmelCase ) # verify the logits __lowercase = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , _lowerCAmelCase ) __lowercase = tf.constant([-0.0_555, 0.4_825, -0.0_852] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) ) @slow def _a ( self : Optional[Any] ) -> Tuple: """simple docstring""" __lowercase = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained( """snap-research/efficientformer-l1-300""" ) __lowercase = self.default_image_processor __lowercase = prepare_img() __lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""tf""" ) # forward pass __lowercase = model(**_lowerCAmelCase , training=_lowerCAmelCase ) # verify the logits __lowercase = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , _lowerCAmelCase ) __lowercase = tf.constant([-0.1_312, 0.4_353, -1.0_499] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
53
1
from __future__ import annotations def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' print(F'Vertex\tShortest Distance from vertex {src}' ) for i, d in enumerate(lowerCamelCase ): print(F'{i}\t\t{d}' ) def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' for j in range(lowerCamelCase ): __lowercase , __lowercase , __lowercase = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]: return True return False def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = [float("""inf""" )] * vertex_count __lowercase = 0.0 for _ in range(vertex_count - 1 ): for j in range(lowerCamelCase ): __lowercase , __lowercase , __lowercase = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]: __lowercase = distance[u] + w __lowercase = check_negative_cycle(lowerCamelCase , lowerCamelCase , lowerCamelCase ) if negative_cycle_exists: raise Exception("""Negative cycle found""" ) return distance if __name__ == "__main__": import doctest doctest.testmod() __UpperCamelCase : Optional[Any] = int(input("""Enter number of vertices: """).strip()) __UpperCamelCase : Tuple = int(input("""Enter number of edges: """).strip()) __UpperCamelCase : list[dict[str, int]] = [{} for _ in range(E)] for i in range(E): print("""Edge """, i + 1) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Any = ( int(x) for x in input("""Enter source, destination, weight: """).strip().split(""" """) ) __UpperCamelCase : Tuple = {"""src""": src, """dst""": dest, """weight""": weight} __UpperCamelCase : Any = int(input("""\nEnter shortest path source:""").strip()) __UpperCamelCase : List[Any] = bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
53
import argparse import json import os import re import shutil import torch from transformers import BioGptConfig, BioGptForCausalLM from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() __UpperCamelCase : Tuple = 2 class __UpperCamelCase : def __init__( self : List[str] , *, # begin keyword-only arguments _lowerCAmelCase : Optional[int]="<s>" , _lowerCAmelCase : Optional[int]="<pad>" , _lowerCAmelCase : int="</s>" , _lowerCAmelCase : str="<unk>" , _lowerCAmelCase : List[str]=None , ) -> Tuple: """simple docstring""" __lowercase , __lowercase , __lowercase , __lowercase = bos, unk, pad, eos __lowercase = [] __lowercase = [] __lowercase = {} __lowercase = self.add_symbol(_lowerCAmelCase ) __lowercase = self.add_symbol(_lowerCAmelCase ) __lowercase = self.add_symbol(_lowerCAmelCase ) __lowercase = self.add_symbol(_lowerCAmelCase ) if extra_special_symbols: for s in extra_special_symbols: self.add_symbol(_lowerCAmelCase ) __lowercase = len(self.symbols ) def __eq__( self : Dict , _lowerCAmelCase : List[str] ) -> Any: """simple docstring""" return self.indices == other.indices def __getitem__( self : Any , _lowerCAmelCase : str ) -> Dict: """simple docstring""" if idx < len(self.symbols ): return self.symbols[idx] return self.unk_word def __len__( self : str ) -> List[str]: """simple docstring""" return len(self.symbols ) def __contains__( self : Union[str, Any] , _lowerCAmelCase : List[Any] ) -> Optional[int]: """simple docstring""" return sym in self.indices @classmethod def _a ( cls : Dict , _lowerCAmelCase : Optional[int] ) -> Union[str, Any]: """simple docstring""" __lowercase = cls() d.add_from_file(_lowerCAmelCase ) return d def _a ( self : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any]=1 , _lowerCAmelCase : Optional[int]=False ) -> Union[str, Any]: """simple docstring""" if word in self.indices and not overwrite: __lowercase = self.indices[word] __lowercase = self.count[idx] + n return idx else: __lowercase = len(self.symbols ) __lowercase = idx self.symbols.append(_lowerCAmelCase ) self.count.append(_lowerCAmelCase ) return idx def _a ( self : List[str] , _lowerCAmelCase : Optional[int] ) -> Optional[Any]: """simple docstring""" return 0 def _a ( self : Optional[Any] , _lowerCAmelCase : Dict ) -> str: """simple docstring""" if isinstance(_lowerCAmelCase , _lowerCAmelCase ): try: with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" ) as fd: self.add_from_file(_lowerCAmelCase ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception("""Incorrect encoding detected in {}, please rebuild the dataset""".format(_lowerCAmelCase ) ) return __lowercase = f.readlines() __lowercase = self._load_meta(_lowerCAmelCase ) for line in lines[indices_start_line:]: try: __lowercase , __lowercase = line.rstrip().rsplit(""" """ , 1 ) if field == "#fairseq:overwrite": __lowercase = True __lowercase , __lowercase = line.rsplit(""" """ , 1 ) else: __lowercase = False __lowercase = int(_lowerCAmelCase ) __lowercase = line if word in self and not overwrite: raise RuntimeError( """Duplicate word found when loading Dictionary: '{}'. """ """Duplicate words can overwrite earlier ones by adding the """ """#fairseq:overwrite flag at the end of the corresponding row """ """in the dictionary file. If using the Camembert model, please """ """download an updated copy of the model file.""".format(_lowerCAmelCase ) ) self.add_symbol(_lowerCAmelCase , n=_lowerCAmelCase , overwrite=_lowerCAmelCase ) except ValueError: raise ValueError("""Incorrect dictionary format, expected '<token> <cnt> [flags]'""" ) def snake_case ( lowerCamelCase ): '''simple docstring''' __lowercase = dict((re.sub(r"""@@$""" , """""" , lowerCamelCase ), v) if k.endswith("""@@""" ) else (re.sub(r"""$""" , """</w>""" , lowerCamelCase ), v) for k, v in d.items() ) __lowercase = """<s> <pad> </s> <unk>""".split() # restore the special tokens for k in keep_keys: del da[F'{k}</w>'] __lowercase = d[k] # restore return da def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' if not os.path.exists(lowerCamelCase ): raise ValueError(F'path {biogpt_checkpoint_path} does not exist!' ) os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase ) print(F'Writing results to {pytorch_dump_folder_path}' ) # handle various types of models __lowercase = os.path.join(lowerCamelCase , """checkpoint.pt""" ) if not os.path.isfile(lowerCamelCase ): raise ValueError(F'path to the file {checkpoint_file} does not exist!' ) __lowercase = torch.load(lowerCamelCase , map_location="""cpu""" ) __lowercase = chkpt["""cfg"""]["""model"""] # dicts __lowercase = os.path.join(lowerCamelCase , """dict.txt""" ) if not os.path.isfile(lowerCamelCase ): raise ValueError(F'path to the file {dict_file} does not exist!' ) __lowercase = Dictionary.load(lowerCamelCase ) __lowercase = rewrite_dict_keys(src_dict.indices ) __lowercase = len(lowerCamelCase ) __lowercase = os.path.join(lowerCamelCase , VOCAB_FILES_NAMES["""vocab_file"""] ) print(F'Generating {src_vocab_file} of {src_vocab_size} records' ) with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) ) # merges_file (bpecodes) __lowercase = os.path.join(lowerCamelCase , """bpecodes""" ) if not os.path.isfile(lowerCamelCase ): raise ValueError(F'path to the file {bpecodes_file} does not exist!' ) __lowercase = os.path.join(lowerCamelCase , VOCAB_FILES_NAMES["""merges_file"""] ) shutil.copyfile(lowerCamelCase , lowerCamelCase ) # model config __lowercase = os.path.join(lowerCamelCase , """config.json""" ) __lowercase = { """activation_dropout""": args["""activation_dropout"""], """architectures""": ["""BioGptForCausalLM"""], """attention_probs_dropout_prob""": args["""attention_dropout"""], """bos_token_id""": 0, """eos_token_id""": 2, """hidden_act""": args["""activation_fn"""], """hidden_dropout_prob""": args["""dropout"""], """hidden_size""": args["""decoder_embed_dim"""], """initializer_range""": 0.02, """intermediate_size""": args["""decoder_ffn_embed_dim"""], """layer_norm_eps""": 1e-12, """layerdrop""": args["""decoder_layerdrop"""], """max_position_embeddings""": args["""max_target_positions"""], """model_type""": """biogpt""", """num_attention_heads""": args["""decoder_attention_heads"""], """num_hidden_layers""": args["""decoder_layers"""], """pad_token_id""": 1, """scale_embedding""": not args["""no_scale_embedding"""], """tie_word_embeddings""": args["""share_decoder_input_output_embed"""], """vocab_size""": src_vocab_size, } # good hparam defaults to start with print(F'Generating {biogpt_model_config_file}' ) with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) ) # tokenizer config __lowercase = os.path.join(lowerCamelCase , lowerCamelCase ) __lowercase = { """bos_token""": """<s>""", """eos_token""": """</s>""", """model_max_length""": 1_024, """pad_token""": """<pad>""", """special_tokens_map_file""": None, """tokenizer_class""": """BioGptTokenizer""", """unk_token""": """<unk>""", } print(F'Generating {biogpt_tokenizer_config_file}' ) with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) ) # model __lowercase = chkpt["""model"""] # remove unneeded keys __lowercase = [ """decoder.version""", ] for k in ignore_keys: model_state_dict.pop(lowerCamelCase , lowerCamelCase ) __lowercase = list(model_state_dict.keys() ) for layer_name in layer_names: if layer_name.endswith("""output_projection.weight""" ): __lowercase = model_state_dict.pop(lowerCamelCase ) else: __lowercase = model_state_dict.pop(lowerCamelCase ) __lowercase = BioGptConfig.from_pretrained(lowerCamelCase ) __lowercase = BioGptForCausalLM(lowerCamelCase ) # check that it loads ok model_new.load_state_dict(lowerCamelCase ) # save __lowercase = os.path.join(lowerCamelCase , lowerCamelCase ) print(F'Generating {pytorch_weights_dump_path}' ) torch.save(lowerCamelCase , lowerCamelCase ) print("""Conversion is done!""" ) if __name__ == "__main__": __UpperCamelCase : int = argparse.ArgumentParser() # Required parameters parser.add_argument( """--biogpt_checkpoint_path""", default=None, type=str, required=True, help=( """Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,""" """ bpecodes, etc.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) __UpperCamelCase : Optional[Any] = parser.parse_args() convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
53
1
from __future__ import annotations from scipy.special import comb # type: ignore class __UpperCamelCase : def __init__( self : int , _lowerCAmelCase : list[tuple[float, float]] ) -> Any: """simple docstring""" __lowercase = list_of_points # Degree determines the flexibility of the curve. # Degree = 1 will produce a straight line. __lowercase = len(_lowerCAmelCase ) - 1 def _a ( self : Tuple , _lowerCAmelCase : float ) -> list[float]: """simple docstring""" assert 0 <= t <= 1, "Time t must be between 0 and 1." __lowercase = [] for i in range(len(self.list_of_points ) ): # basis function for each i output_values.append( comb(self.degree , _lowerCAmelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) ) # the basis must sum up to 1 for it to produce a valid Bezier curve. assert round(sum(_lowerCAmelCase ) , 5 ) == 1 return output_values def _a ( self : List[str] , _lowerCAmelCase : float ) -> tuple[float, float]: """simple docstring""" assert 0 <= t <= 1, "Time t must be between 0 and 1." __lowercase = self.basis_function(_lowerCAmelCase ) __lowercase = 0.0 __lowercase = 0.0 for i in range(len(self.list_of_points ) ): # For all points, sum up the product of i-th basis function and i-th point. x += basis_function[i] * self.list_of_points[i][0] y += basis_function[i] * self.list_of_points[i][1] return (x, y) def _a ( self : Optional[int] , _lowerCAmelCase : float = 0.01 ) -> Union[str, Any]: """simple docstring""" from matplotlib import pyplot as plt # type: ignore __lowercase = [] # x coordinates of points to plot __lowercase = [] # y coordinates of points to plot __lowercase = 0.0 while t <= 1: __lowercase = self.bezier_curve_function(_lowerCAmelCase ) to_plot_x.append(value[0] ) to_plot_y.append(value[1] ) t += step_size __lowercase = [i[0] for i in self.list_of_points] __lowercase = [i[1] for i in self.list_of_points] plt.plot( _lowerCAmelCase , _lowerCAmelCase , color="""blue""" , label="""Curve of Degree """ + str(self.degree ) , ) plt.scatter(_lowerCAmelCase , _lowerCAmelCase , color="""red""" , label="""Control Points""" ) plt.legend() plt.show() if __name__ == "__main__": import doctest doctest.testmod() BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1 BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2 BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
53
import tempfile import unittest import numpy as np from diffusers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionPipeline, PNDMScheduler, ) from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ): __snake_case :Union[str, Any] = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline' def _a ( self : Any , _lowerCAmelCase : str=0 ) -> str: """simple docstring""" __lowercase = np.random.RandomState(_lowerCAmelCase ) __lowercase = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def _a ( self : int ) -> List[Any]: """simple docstring""" __lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) __lowercase = self.get_dummy_inputs() __lowercase = pipe(**_lowerCAmelCase ).images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __lowercase = np.array([0.65_072, 0.58_492, 0.48_219, 0.55_521, 0.53_180, 0.55_939, 0.50_697, 0.39_800, 0.46_455] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _a ( self : Union[str, Any] ) -> str: """simple docstring""" __lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __lowercase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) __lowercase = self.get_dummy_inputs() __lowercase = pipe(**_lowerCAmelCase ).images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __lowercase = np.array([0.65_863, 0.59_425, 0.49_326, 0.56_313, 0.53_875, 0.56_627, 0.51_065, 0.39_777, 0.46_330] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _a ( self : Optional[int] ) -> Optional[int]: """simple docstring""" __lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __lowercase = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) __lowercase = self.get_dummy_inputs() __lowercase = pipe(**_lowerCAmelCase ).images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __lowercase = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _a ( self : Tuple ) -> int: """simple docstring""" __lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __lowercase = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) __lowercase = self.get_dummy_inputs() __lowercase = pipe(**_lowerCAmelCase ).images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __lowercase = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _a ( self : Tuple ) -> Union[str, Any]: """simple docstring""" __lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __lowercase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) __lowercase = self.get_dummy_inputs() __lowercase = pipe(**_lowerCAmelCase ).images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __lowercase = np.array([0.53_817, 0.60_812, 0.47_384, 0.49_530, 0.51_894, 0.49_814, 0.47_984, 0.38_958, 0.44_271] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _a ( self : Union[str, Any] ) -> List[str]: """simple docstring""" __lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) __lowercase = self.get_dummy_inputs() __lowercase = pipe(**_lowerCAmelCase ).images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __lowercase = np.array([0.53_895, 0.60_808, 0.47_933, 0.49_608, 0.51_886, 0.49_950, 0.48_053, 0.38_957, 0.44_200] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _a ( self : List[str] ) -> List[str]: """simple docstring""" __lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) __lowercase = self.get_dummy_inputs() __lowercase = 3 * [inputs["""prompt"""]] # forward __lowercase = pipe(**_lowerCAmelCase ) __lowercase = output.images[0, -3:, -3:, -1] __lowercase = self.get_dummy_inputs() __lowercase = 3 * [inputs.pop("""prompt""" )] __lowercase = pipe.tokenizer( _lowerCAmelCase , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors="""np""" , ) __lowercase = text_inputs["""input_ids"""] __lowercase = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] __lowercase = prompt_embeds # forward __lowercase = pipe(**_lowerCAmelCase ) __lowercase = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4 def _a ( self : int ) -> str: """simple docstring""" __lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) __lowercase = self.get_dummy_inputs() __lowercase = 3 * ["""this is a negative prompt"""] __lowercase = negative_prompt __lowercase = 3 * [inputs["""prompt"""]] # forward __lowercase = pipe(**_lowerCAmelCase ) __lowercase = output.images[0, -3:, -3:, -1] __lowercase = self.get_dummy_inputs() __lowercase = 3 * [inputs.pop("""prompt""" )] __lowercase = [] for p in [prompt, negative_prompt]: __lowercase = pipe.tokenizer( _lowerCAmelCase , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors="""np""" , ) __lowercase = text_inputs["""input_ids"""] embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] ) __lowercase , __lowercase = embeds # forward __lowercase = pipe(**_lowerCAmelCase ) __lowercase = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4 @nightly @require_onnxruntime @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): @property def _a ( self : Dict ) -> str: """simple docstring""" return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _a ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" __lowercase = ort.SessionOptions() __lowercase = False return options def _a ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" __lowercase = OnnxStableDiffusionPipeline.from_pretrained( """CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) __lowercase = """A painting of a squirrel eating a burger""" np.random.seed(0 ) __lowercase = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="""np""" ) __lowercase = output.images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) __lowercase = np.array([0.0_452, 0.0_390, 0.0_087, 0.0_350, 0.0_617, 0.0_364, 0.0_544, 0.0_523, 0.0_720] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _a ( self : Tuple ) -> Any: """simple docstring""" __lowercase = DDIMScheduler.from_pretrained( """runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" ) __lowercase = OnnxStableDiffusionPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) __lowercase = """open neural network exchange""" __lowercase = np.random.RandomState(0 ) __lowercase = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCAmelCase , output_type="""np""" ) __lowercase = output.images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) __lowercase = np.array([0.2_867, 0.1_974, 0.1_481, 0.7_294, 0.7_251, 0.6_667, 0.4_194, 0.5_642, 0.6_486] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _a ( self : Dict ) -> Dict: """simple docstring""" __lowercase = LMSDiscreteScheduler.from_pretrained( """runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" ) __lowercase = OnnxStableDiffusionPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) __lowercase = """open neural network exchange""" __lowercase = np.random.RandomState(0 ) __lowercase = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCAmelCase , output_type="""np""" ) __lowercase = output.images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) __lowercase = np.array([0.2_306, 0.1_959, 0.1_593, 0.6_549, 0.6_394, 0.5_408, 0.5_065, 0.6_010, 0.6_161] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _a ( self : str ) -> List[str]: """simple docstring""" __lowercase = 0 def test_callback_fn(_lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : np.ndarray ) -> None: __lowercase = True nonlocal number_of_steps number_of_steps += 1 if step == 0: assert latents.shape == (1, 4, 64, 64) __lowercase = latents[0, -3:, -3:, -1] __lowercase = np.array( [-0.6_772, -0.3_835, -1.2_456, 0.1_905, -1.0_974, 0.6_967, -1.9_353, 0.0_178, 1.0_167] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3 elif step == 5: assert latents.shape == (1, 4, 64, 64) __lowercase = latents[0, -3:, -3:, -1] __lowercase = np.array( [-0.3_351, 0.2_241, -0.1_837, -0.2_325, -0.6_577, 0.3_393, -0.0_241, 0.5_899, 1.3_875] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3 __lowercase = False __lowercase = OnnxStableDiffusionPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) __lowercase = """Andromeda galaxy in a bottle""" __lowercase = np.random.RandomState(0 ) pipe( prompt=_lowerCAmelCase , num_inference_steps=5 , guidance_scale=7.5 , generator=_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 , ) assert test_callback_fn.has_been_called assert number_of_steps == 6 def _a ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __lowercase = OnnxStableDiffusionPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) assert pipe.safety_checker is None __lowercase = pipe("""example prompt""" , num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(_lowerCAmelCase ) __lowercase = OnnxStableDiffusionPipeline.from_pretrained(_lowerCAmelCase ) # sanity check that the pipeline still works assert pipe.safety_checker is None __lowercase = pipe("""example prompt""" , num_inference_steps=2 ).images[0] assert image is not None
53
1
import argparse import json import os import time import zipfile from get_ci_error_statistics import download_artifact, get_artifacts_links from transformers import logging __UpperCamelCase : Optional[Any] = logging.get_logger(__name__) def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = set() __lowercase = [] def parse_line(lowerCamelCase ): for line in fp: if isinstance(lowerCamelCase , lowerCamelCase ): __lowercase = line.decode("""UTF-8""" ) if "warnings summary (final)" in line: continue # This means we are outside the body of a warning elif not line.startswith(""" """ ): # process a single warning and move it to `selected_warnings`. if len(lowerCamelCase ) > 0: __lowercase = """\n""".join(lowerCamelCase ) # Only keep the warnings specified in `targets` if any(F': {x}: ' in warning for x in targets ): selected_warnings.add(lowerCamelCase ) buffer.clear() continue else: __lowercase = line.strip() buffer.append(lowerCamelCase ) if from_gh: for filename in os.listdir(lowerCamelCase ): __lowercase = os.path.join(lowerCamelCase , lowerCamelCase ) if not os.path.isdir(lowerCamelCase ): # read the file if filename != "warnings.txt": continue with open(lowerCamelCase ) as fp: parse_line(lowerCamelCase ) else: try: with zipfile.ZipFile(lowerCamelCase ) as z: for filename in z.namelist(): if not os.path.isdir(lowerCamelCase ): # read the file if filename != "warnings.txt": continue with z.open(lowerCamelCase ) as fp: parse_line(lowerCamelCase ) except Exception: logger.warning( F'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' ) return selected_warnings def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = set() __lowercase = [os.path.join(lowerCamelCase , lowerCamelCase ) for p in os.listdir(lowerCamelCase ) if (p.endswith(""".zip""" ) or from_gh)] for p in paths: selected_warnings.update(extract_warnings_from_single_artifact(lowerCamelCase , lowerCamelCase ) ) return selected_warnings if __name__ == "__main__": def snake_case ( lowerCamelCase ): '''simple docstring''' return values.split(""",""" ) __UpperCamelCase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""") parser.add_argument( """--output_dir""", type=str, required=True, help="""Where to store the downloaded artifacts and other result files.""", ) parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""") # optional parameters parser.add_argument( """--targets""", default="""DeprecationWarning,UserWarning,FutureWarning""", type=list_str, help="""Comma-separated list of target warning(s) which we want to extract.""", ) parser.add_argument( """--from_gh""", action="""store_true""", help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""", ) __UpperCamelCase : List[str] = parser.parse_args() __UpperCamelCase : Union[str, Any] = args.from_gh if from_gh: # The artifacts have to be downloaded using `actions/download-artifact@v3` pass else: os.makedirs(args.output_dir, exist_ok=True) # get download links __UpperCamelCase : Any = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) # download artifacts for idx, (name, url) in enumerate(artifacts.items()): print(name) print(url) print("""=""" * 80) download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) # extract warnings from artifacts __UpperCamelCase : Union[str, Any] = extract_warnings(args.output_dir, args.targets) __UpperCamelCase : Any = sorted(selected_warnings) with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
53
def snake_case ( lowerCamelCase ): '''simple docstring''' __lowercase = """""" for ch in key: if ch == " " or ch not in key_no_dups and ch.isalpha(): key_no_dups += ch return key_no_dups def snake_case ( lowerCamelCase ): '''simple docstring''' __lowercase = [chr(i + 65 ) for i in range(26 )] # Remove duplicate characters from key __lowercase = remove_duplicates(key.upper() ) __lowercase = len(lowerCamelCase ) # First fill cipher with key characters __lowercase = {alphabet[i]: char for i, char in enumerate(lowerCamelCase )} # Then map remaining characters in alphabet to # the alphabet from the beginning for i in range(len(lowerCamelCase ) , 26 ): __lowercase = alphabet[i - offset] # Ensure we are not mapping letters to letters previously mapped while char in key: offset -= 1 __lowercase = alphabet[i - offset] __lowercase = char return cipher_alphabet def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' return "".join(cipher_map.get(lowerCamelCase , lowerCamelCase ) for ch in message.upper() ) def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = {v: k for k, v in cipher_map.items()} return "".join(rev_cipher_map.get(lowerCamelCase , lowerCamelCase ) for ch in message.upper() ) def snake_case ( ): '''simple docstring''' __lowercase = input("""Enter message to encode or decode: """ ).strip() __lowercase = input("""Enter keyword: """ ).strip() __lowercase = input("""Encipher or decipher? E/D:""" ).strip()[0].lower() try: __lowercase = {"""e""": encipher, """d""": decipher}[option] except KeyError: raise KeyError("""invalid input option""" ) __lowercase = create_cipher_map(lowerCamelCase ) print(func(lowerCamelCase , lowerCamelCase ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
53
1
import unittest from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __UpperCamelCase : Tuple = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece @require_tokenizers class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ): __snake_case :Any = XLNetTokenizer __snake_case :int = XLNetTokenizerFast __snake_case :Union[str, Any] = True __snake_case :str = True def _a ( self : Optional[Any] ) -> Tuple: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing __lowercase = XLNetTokenizer(_lowerCAmelCase , keep_accents=_lowerCAmelCase ) tokenizer.sanitize_special_tokens() tokenizer.save_pretrained(self.tmpdirname ) def _a ( self : Tuple ) -> Optional[Any]: """simple docstring""" __lowercase = """<s>""" __lowercase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) , _lowerCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) , _lowerCAmelCase ) def _a ( self : Tuple ) -> Optional[int]: """simple docstring""" __lowercase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<unk>""" ) self.assertEqual(vocab_keys[1] , """<s>""" ) self.assertEqual(vocab_keys[-1] , """<eod>""" ) self.assertEqual(len(_lowerCAmelCase ) , 1006 ) def _a ( self : Optional[int] ) -> List[str]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1000 ) def _a ( self : Optional[int] ) -> Optional[int]: """simple docstring""" __lowercase = XLNetTokenizer(_lowerCAmelCase , keep_accents=_lowerCAmelCase ) __lowercase = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(_lowerCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [285, 46, 10, 170, 382] ) __lowercase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( _lowerCAmelCase , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) __lowercase = tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] ) __lowercase = tokenizer.convert_ids_to_tokens(_lowerCAmelCase ) self.assertListEqual( _lowerCAmelCase , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) def _a ( self : Optional[Any] ) -> Tuple: """simple docstring""" __lowercase = XLNetTokenizer(_lowerCAmelCase , do_lower_case=_lowerCAmelCase ) __lowercase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( _lowerCAmelCase , [ SPIECE_UNDERLINE + """""", """i""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """se""", """.""", ] , ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""▁he""", """ll""", """o"""] ) def _a ( self : List[Any] ) -> Tuple: """simple docstring""" __lowercase = XLNetTokenizer(_lowerCAmelCase , do_lower_case=_lowerCAmelCase ) __lowercase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( _lowerCAmelCase , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """se""", """.""", ] , ) @slow def _a ( self : Union[str, Any] ) -> Any: """simple docstring""" __lowercase = XLNetTokenizer.from_pretrained("""xlnet-base-cased""" ) __lowercase = tokenizer.encode("""sequence builders""" , add_special_tokens=_lowerCAmelCase ) __lowercase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_lowerCAmelCase ) __lowercase = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase ) __lowercase = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase ) assert encoded_sentence == text + [4, 3] assert encoded_pair == text + [4] + text_a + [4, 3] @slow def _a ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" __lowercase = {"""input_ids""": [[17, 2_1442, 270, 17, 10, 1_4645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 2_2018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 1_4431, 13, 5500, 11, 1176, 580, 13, 1_6819, 4797, 23, 17, 10, 1_7135, 658, 19, 457, 7932, 13, 184, 19, 3154, 1_7135, 6468, 19, 1404, 1_2269, 19, 4229, 5356, 1_6264, 46, 19, 17, 2_0545, 1_0395, 9, 9, 9, 11, 28, 6421, 9531, 2_0729, 17, 10, 353, 1_7022, 11, 21, 6421, 9531, 1_6949, 17, 10, 1_1509, 753, 11, 33, 95, 2421, 7385, 956, 1_4431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 2_4738, 19, 1_3203, 658, 218, 787, 21, 430, 1_8482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 2_2178, 27, 1064, 22, 956, 13, 1_1101, 1429, 5854, 2_4313, 1_8953, 40, 422, 2_4366, 68, 1758, 37, 1_0483, 1_4257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 1_3894, 3380, 23, 95, 18, 1_7634, 2288, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_lowerCAmelCase , model_name="""xlnet-base-cased""" , revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" , )
53
import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): __snake_case :Any = IFInpaintingPipeline __snake_case :str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'} __snake_case :Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS __snake_case :str = PipelineTesterMixin.required_optional_params - {'latents'} def _a ( self : Union[str, Any] ) -> Tuple: """simple docstring""" return self._get_dummy_components() def _a ( self : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict=0 ) -> Any: """simple docstring""" if str(_lowerCAmelCase ).startswith("""mps""" ): __lowercase = torch.manual_seed(_lowerCAmelCase ) else: __lowercase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase ) __lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase ) __lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase ) __lowercase = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """mask_image""": mask_image, """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def _a ( self : Tuple ) -> Dict: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) def _a ( self : Union[str, Any] ) -> Dict: """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" ) def _a ( self : List[str] ) -> Tuple: """simple docstring""" super().test_save_load_floataa(expected_max_diff=1e-1 ) def _a ( self : Tuple ) -> Optional[Any]: """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def _a ( self : str ) -> Optional[int]: """simple docstring""" self._test_save_load_local() def _a ( self : List[str] ) -> List[Any]: """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1e-2 , )
53
1
import os import time import numpy as np import onnxruntime as ort __UpperCamelCase : List[str] = """1""" __UpperCamelCase : Any = """0""" __UpperCamelCase : Dict = """1""" __UpperCamelCase : int = ort.SessionOptions() __UpperCamelCase : Optional[int] = ort.GraphOptimizationLevel.ORT_DISABLE_ALL print("""Create inference session...""") __UpperCamelCase : Dict = ["""TensorrtExecutionProvider""", """CUDAExecutionProvider"""] __UpperCamelCase : str = ort.InferenceSession("""model.onnx""", sess_options=sess_opt, providers=execution_provider) __UpperCamelCase : str = ort.RunOptions() __UpperCamelCase : List[str] = 128 __UpperCamelCase : Any = 1 __UpperCamelCase : int = np.ones((batch, sequence), dtype=np.intaa) __UpperCamelCase : Any = np.ones((batch, sequence), dtype=np.intaa) __UpperCamelCase : Tuple = np.ones((batch, sequence), dtype=np.intaa) print("""Warm up phase...""") sess.run( None, { sess.get_inputs()[0].name: input_ids, sess.get_inputs()[1].name: attention_mask, sess.get_inputs()[2].name: token_type_ids, }, run_options=run_opt, ) print("""Start inference...""") __UpperCamelCase : Dict = time.time() __UpperCamelCase : Optional[Any] = 2000 __UpperCamelCase : List[Any] = {} for iter in range(max_iters): __UpperCamelCase : Any = sess.run( None, { sess.get_inputs()[0].name: input_ids, sess.get_inputs()[1].name: attention_mask, sess.get_inputs()[2].name: token_type_ids, }, run_options=run_opt, ) print("""Average Inference Time = {:.3f} ms""".format((time.time() - start_time) * 1000 / max_iters))
53
import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class __UpperCamelCase ( _lowerCAmelCase ): __snake_case :str = (UnCLIPScheduler,) def _a ( self : Optional[int] , **_lowerCAmelCase : Any ) -> Tuple: """simple docstring""" __lowercase = { """num_train_timesteps""": 1000, """variance_type""": """fixed_small_log""", """clip_sample""": True, """clip_sample_range""": 1.0, """prediction_type""": """epsilon""", } config.update(**_lowerCAmelCase ) return config def _a ( self : Dict ) -> List[Any]: """simple docstring""" for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=_lowerCAmelCase ) def _a ( self : List[str] ) -> Tuple: """simple docstring""" for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=_lowerCAmelCase ) def _a ( self : Any ) -> Any: """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=_lowerCAmelCase ) def _a ( self : Any ) -> Optional[Any]: """simple docstring""" for clip_sample_range in [1, 5, 10, 20]: self.check_over_configs(clip_sample_range=_lowerCAmelCase ) def _a ( self : Optional[int] ) -> Tuple: """simple docstring""" for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=_lowerCAmelCase ) def _a ( self : str ) -> int: """simple docstring""" for time_step in [0, 500, 999]: for prev_timestep in [None, 5, 100, 250, 500, 750]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=_lowerCAmelCase , prev_timestep=_lowerCAmelCase ) def _a ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __lowercase = self.scheduler_classes[0] __lowercase = self.get_scheduler_config(variance_type="""fixed_small_log""" ) __lowercase = scheduler_class(**_lowerCAmelCase ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1e-5 def _a ( self : str ) -> Optional[int]: """simple docstring""" __lowercase = self.scheduler_classes[0] __lowercase = self.get_scheduler_config(variance_type="""learned_range""" ) __lowercase = scheduler_class(**_lowerCAmelCase ) __lowercase = 0.5 assert scheduler._get_variance(1 , predicted_variance=_lowerCAmelCase ) - -10.1_712_790 < 1e-5 assert scheduler._get_variance(487 , predicted_variance=_lowerCAmelCase ) - -5.7_998_052 < 1e-5 assert scheduler._get_variance(999 , predicted_variance=_lowerCAmelCase ) - -0.0_010_011 < 1e-5 def _a ( self : Optional[Any] ) -> List[Any]: """simple docstring""" __lowercase = self.scheduler_classes[0] __lowercase = self.get_scheduler_config() __lowercase = scheduler_class(**_lowerCAmelCase ) __lowercase = scheduler.timesteps __lowercase = self.dummy_model() __lowercase = self.dummy_sample_deter __lowercase = torch.manual_seed(0 ) for i, t in enumerate(_lowerCAmelCase ): # 1. predict noise residual __lowercase = model(_lowerCAmelCase , _lowerCAmelCase ) # 2. predict previous mean of sample x_t-1 __lowercase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample __lowercase = pred_prev_sample __lowercase = torch.sum(torch.abs(_lowerCAmelCase ) ) __lowercase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_sum.item() - 252.2_682_495 ) < 1e-2 assert abs(result_mean.item() - 0.3_284_743 ) < 1e-3 def _a ( self : List[str] ) -> str: """simple docstring""" __lowercase = self.scheduler_classes[0] __lowercase = self.get_scheduler_config() __lowercase = scheduler_class(**_lowerCAmelCase ) scheduler.set_timesteps(25 ) __lowercase = scheduler.timesteps __lowercase = self.dummy_model() __lowercase = self.dummy_sample_deter __lowercase = torch.manual_seed(0 ) for i, t in enumerate(_lowerCAmelCase ): # 1. predict noise residual __lowercase = model(_lowerCAmelCase , _lowerCAmelCase ) if i + 1 == timesteps.shape[0]: __lowercase = None else: __lowercase = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 __lowercase = scheduler.step( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , prev_timestep=_lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample __lowercase = pred_prev_sample __lowercase = torch.sum(torch.abs(_lowerCAmelCase ) ) __lowercase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_sum.item() - 258.2_044_983 ) < 1e-2 assert abs(result_mean.item() - 0.3_362_038 ) < 1e-3 def _a ( self : str ) -> Union[str, Any]: """simple docstring""" pass def _a ( self : int ) -> List[str]: """simple docstring""" pass
53
1
class __UpperCamelCase : def __init__( self : str , _lowerCAmelCase : int ) -> None: """simple docstring""" __lowercase = size __lowercase = [0] * size __lowercase = [0] * size @staticmethod def _a ( _lowerCAmelCase : int ) -> int: """simple docstring""" return index | (index + 1) @staticmethod def _a ( _lowerCAmelCase : int ) -> int: """simple docstring""" return (index & (index + 1)) - 1 def _a ( self : Any , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> None: """simple docstring""" __lowercase = value while index < self.size: __lowercase = self.get_prev(_lowerCAmelCase ) + 1 if current_left_border == index: __lowercase = value else: __lowercase = max(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) __lowercase = self.get_next(_lowerCAmelCase ) def _a ( self : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> int: """simple docstring""" right -= 1 # Because of right is exclusive __lowercase = 0 while left <= right: __lowercase = self.get_prev(_lowerCAmelCase ) if left <= current_left: __lowercase = max(_lowerCAmelCase , self.tree[right] ) __lowercase = current_left else: __lowercase = max(_lowerCAmelCase , self.arr[right] ) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
53
import os import time import warnings from dataclasses import dataclass, field from enum import Enum from typing import List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import logging from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors from ..processors.utils import InputFeatures __UpperCamelCase : Any = logging.get_logger(__name__) @dataclass class __UpperCamelCase : __snake_case :str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys() )} ) __snake_case :str = field( metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} ) __snake_case :int = field( default=1_2_8 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) __snake_case :bool = field( default=_lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) def _a ( self : Dict ) -> List[Any]: """simple docstring""" __lowercase = self.task_name.lower() class __UpperCamelCase ( _lowerCAmelCase ): __snake_case :Optional[int] = 'train' __snake_case :int = 'dev' __snake_case :Any = 'test' class __UpperCamelCase ( _lowerCAmelCase ): __snake_case :GlueDataTrainingArguments __snake_case :str __snake_case :List[InputFeatures] def __init__( self : Dict , _lowerCAmelCase : GlueDataTrainingArguments , _lowerCAmelCase : PreTrainedTokenizerBase , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Union[str, Split] = Split.train , _lowerCAmelCase : Optional[str] = None , ) -> List[Any]: """simple docstring""" warnings.warn( """This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """ """library. You can have a look at this example script for pointers: """ """https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , _lowerCAmelCase , ) __lowercase = args __lowercase = glue_processors[args.task_name]() __lowercase = glue_output_modes[args.task_name] if isinstance(_lowerCAmelCase , _lowerCAmelCase ): try: __lowercase = Split[mode] except KeyError: raise KeyError("""mode is not a valid split name""" ) # Load data features from cache or dataset file __lowercase = os.path.join( cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , ) __lowercase = self.processor.get_labels() if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in ( "RobertaTokenizer", "RobertaTokenizerFast", "XLMRobertaTokenizer", "BartTokenizer", "BartTokenizerFast", ): # HACK(label indices are swapped in RoBERTa pretrained model) __lowercase , __lowercase = label_list[2], label_list[1] __lowercase = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. __lowercase = cached_features_file + """.lock""" with FileLock(_lowerCAmelCase ): if os.path.exists(_lowerCAmelCase ) and not args.overwrite_cache: __lowercase = time.time() __lowercase = torch.load(_lowerCAmelCase ) logger.info( F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start ) else: logger.info(F'Creating features from dataset file at {args.data_dir}' ) if mode == Split.dev: __lowercase = self.processor.get_dev_examples(args.data_dir ) elif mode == Split.test: __lowercase = self.processor.get_test_examples(args.data_dir ) else: __lowercase = self.processor.get_train_examples(args.data_dir ) if limit_length is not None: __lowercase = examples[:limit_length] __lowercase = glue_convert_examples_to_features( _lowerCAmelCase , _lowerCAmelCase , max_length=args.max_seq_length , label_list=_lowerCAmelCase , output_mode=self.output_mode , ) __lowercase = time.time() torch.save(self.features , _lowerCAmelCase ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' ) def __len__( self : Dict ) -> Optional[int]: """simple docstring""" return len(self.features ) def __getitem__( self : Tuple , _lowerCAmelCase : Optional[int] ) -> InputFeatures: """simple docstring""" return self.features[i] def _a ( self : str ) -> int: """simple docstring""" return self.label_list
53
1
import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand __UpperCamelCase : Tuple = ( """4S 3H 2C 7S 5H""", """9D 8H 2C 6S 7H""", """2D 6D 9D TH 7D""", """TC 8C 2S JH 6C""", """JH 8S TH AH QH""", """TS KS 5S 9S AC""", """KD 6S 9D TH AD""", """KS 8D 4D 9S 4S""", # pair """8C 4S KH JS 4D""", # pair """QH 8H KD JH 8S""", # pair """KC 4H KS 2H 8D""", # pair """KD 4S KC 3H 8S""", # pair """AH 8S AS KC JH""", # pair """3H 4C 4H 3S 2H""", # 2 pairs """5S 5D 2C KH KH""", # 2 pairs """3C KH 5D 5S KH""", # 2 pairs """AS 3C KH AD KH""", # 2 pairs """7C 7S 3S 7H 5S""", # 3 of a kind """7C 7S KH 2H 7H""", # 3 of a kind """AC KH QH AH AS""", # 3 of a kind """2H 4D 3C AS 5S""", # straight (low ace) """3C 5C 4C 2C 6H""", # straight """6S 8S 7S 5H 9H""", # straight """JS QS 9H TS KH""", # straight """QC KH TS JS AH""", # straight (high ace) """8C 9C 5C 3C TC""", # flush """3S 8S 9S 5S KS""", # flush """4C 5C 9C 8C KC""", # flush """JH 8H AH KH QH""", # flush """3D 2H 3H 2C 2D""", # full house """2H 2C 3S 3H 3D""", # full house """KH KC 3S 3H 3D""", # full house """JC 6H JS JD JH""", # 4 of a kind """JC 7H JS JD JH""", # 4 of a kind """JC KH JS JD JH""", # 4 of a kind """2S AS 4S 5S 3S""", # straight flush (low ace) """2D 6D 3D 4D 5D""", # straight flush """5C 6C 3C 7C 4C""", # straight flush """JH 9H TH KH QH""", # straight flush """JH AH TH KH QH""", # royal flush (high ace straight flush) ) __UpperCamelCase : Dict = ( ("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""), ("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""), ("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""), ("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""), ("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""), ("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""), ("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""), ("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""), ("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""), ("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""), ("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""), ("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""), ("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""), ("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""), ("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""), ("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""), ("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""), ("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""), ("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""), ("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""), ("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""), ("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""), ("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""), ("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""), ("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""), ("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""), ("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""), ("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""), ("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""), ("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""), ("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""), ("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""), ("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""), ) __UpperCamelCase : Optional[int] = ( ("""2H 3H 4H 5H 6H""", True), ("""AS AH 2H AD AC""", False), ("""2H 3H 5H 6H 7H""", True), ("""KS AS TS QS JS""", True), ("""8H 9H QS JS TH""", False), ("""AS 3S 4S 8S 2S""", True), ) __UpperCamelCase : int = ( ("""2H 3H 4H 5H 6H""", True), ("""AS AH 2H AD AC""", False), ("""2H 3H 5H 6H 7H""", False), ("""KS AS TS QS JS""", True), ("""8H 9H QS JS TH""", True), ) __UpperCamelCase : str = ( ("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 14]), ("""2H 5D 3C AS 5S""", False, [14, 5, 5, 3, 2]), ("""JH QD KC AS TS""", False, [14, 13, 12, 11, 10]), ("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]), ) __UpperCamelCase : Any = ( ("""JH AH TH KH QH""", 0), ("""JH 9H TH KH QH""", 0), ("""JC KH JS JD JH""", 7), ("""KH KC 3S 3H 3D""", 6), ("""8C 9C 5C 3C TC""", 0), ("""JS QS 9H TS KH""", 0), ("""7C 7S KH 2H 7H""", 3), ("""3C KH 5D 5S KH""", 2), ("""QH 8H KD JH 8S""", 1), ("""2D 6D 9D TH 7D""", 0), ) __UpperCamelCase : Tuple = ( ("""JH AH TH KH QH""", 23), ("""JH 9H TH KH QH""", 22), ("""JC KH JS JD JH""", 21), ("""KH KC 3S 3H 3D""", 20), ("""8C 9C 5C 3C TC""", 19), ("""JS QS 9H TS KH""", 18), ("""7C 7S KH 2H 7H""", 17), ("""3C KH 5D 5S KH""", 16), ("""QH 8H KD JH 8S""", 15), ("""2D 6D 9D TH 7D""", 14), ) def snake_case ( ): '''simple docstring''' __lowercase , __lowercase = randrange(len(lowerCamelCase ) ), randrange(len(lowerCamelCase ) ) __lowercase = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)] __lowercase , __lowercase = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def snake_case ( lowerCamelCase = 100 ): '''simple docstring''' return (generate_random_hand() for _ in range(lowerCamelCase )) @pytest.mark.parametrize("""hand, expected""" , lowerCamelCase ) def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' assert PokerHand(lowerCamelCase )._is_flush() == expected @pytest.mark.parametrize("""hand, expected""" , lowerCamelCase ) def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' assert PokerHand(lowerCamelCase )._is_straight() == expected @pytest.mark.parametrize("""hand, expected, card_values""" , lowerCamelCase ) def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = PokerHand(lowerCamelCase ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize("""hand, expected""" , lowerCamelCase ) def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' assert PokerHand(lowerCamelCase )._is_same_kind() == expected @pytest.mark.parametrize("""hand, expected""" , lowerCamelCase ) def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' assert PokerHand(lowerCamelCase )._hand_type == expected @pytest.mark.parametrize("""hand, other, expected""" , lowerCamelCase ) def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' assert PokerHand(lowerCamelCase ).compare_with(PokerHand(lowerCamelCase ) ) == expected @pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() ) def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' assert PokerHand(lowerCamelCase ).compare_with(PokerHand(lowerCamelCase ) ) == expected def snake_case ( ): '''simple docstring''' __lowercase = [PokerHand(lowerCamelCase ) for hand in SORTED_HANDS] __lowercase = poker_hands.copy() shuffle(lowerCamelCase ) __lowercase = chain(sorted(lowerCamelCase ) ) for index, hand in enumerate(lowerCamelCase ): assert hand == poker_hands[index] def snake_case ( ): '''simple docstring''' __lowercase = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )] pokerhands.sort(reverse=lowerCamelCase ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def snake_case ( ): '''simple docstring''' __lowercase = PokerHand("""2C 4S AS 3D 5C""" ) __lowercase = True __lowercase = [5, 4, 3, 2, 14] for _ in range(10 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def snake_case ( ): '''simple docstring''' __lowercase = 0 __lowercase = os.path.abspath(os.path.dirname(lowerCamelCase ) ) __lowercase = os.path.join(lowerCamelCase , """poker_hands.txt""" ) with open(lowerCamelCase ) as file_hand: for line in file_hand: __lowercase = line[:14].strip() __lowercase = line[15:].strip() __lowercase , __lowercase = PokerHand(lowerCamelCase ), PokerHand(lowerCamelCase ) __lowercase = player.compare_with(lowerCamelCase ) if output == "Win": answer += 1 assert answer == 376
53
import logging import math import os from dataclasses import dataclass, field from glob import glob from typing import Optional from torch.utils.data import ConcatDataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_WITH_LM_HEAD_MAPPING, AutoConfig, AutoModelWithLMHead, AutoTokenizer, DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForWholeWordMask, HfArgumentParser, LineByLineTextDataset, LineByLineWithRefDataset, PreTrainedTokenizer, TextDataset, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process __UpperCamelCase : List[Any] = logging.getLogger(__name__) __UpperCamelCase : Optional[Any] = list(MODEL_WITH_LM_HEAD_MAPPING.keys()) __UpperCamelCase : List[str] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class __UpperCamelCase : __snake_case :Optional[str] = field( default=_lowerCAmelCase , metadata={ 'help': ( 'The model checkpoint for weights initialization. Leave None if you want to train a model from' ' scratch.' ) } , ) __snake_case :Optional[str] = field( default=_lowerCAmelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(_lowerCAmelCase )} , ) __snake_case :Optional[str] = field( default=_lowerCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) __snake_case :Optional[str] = field( default=_lowerCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) __snake_case :Optional[str] = field( default=_lowerCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) @dataclass class __UpperCamelCase : __snake_case :Optional[str] = field( default=_lowerCAmelCase , metadata={'help': 'The input training data file (a text file).'} ) __snake_case :Optional[str] = field( default=_lowerCAmelCase , metadata={ 'help': ( 'The input training data files (multiple files in glob format). ' 'Very often splitting large files to smaller files can prevent tokenizer going out of memory' ) } , ) __snake_case :Optional[str] = field( default=_lowerCAmelCase , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , ) __snake_case :Optional[str] = field( default=_lowerCAmelCase , metadata={'help': 'An optional input train ref data file for whole word mask in Chinese.'} , ) __snake_case :Optional[str] = field( default=_lowerCAmelCase , metadata={'help': 'An optional input eval ref data file for whole word mask in Chinese.'} , ) __snake_case :bool = field( default=_lowerCAmelCase , metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'} , ) __snake_case :bool = field( default=_lowerCAmelCase , metadata={'help': 'Train with masked-language modeling loss instead of language modeling.'} ) __snake_case :bool = field(default=_lowerCAmelCase , metadata={'help': 'Whether ot not to use whole word mask.'} ) __snake_case :float = field( default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} ) __snake_case :float = field( default=1 / 6 , metadata={ 'help': ( 'Ratio of length of a span of masked tokens to surrounding context length for permutation language' ' modeling.' ) } , ) __snake_case :int = field( default=5 , metadata={'help': 'Maximum length of a span of masked tokens for permutation language modeling.'} ) __snake_case :int = field( default=-1 , metadata={ 'help': ( 'Optional input sequence length after tokenization.' 'The training dataset will be truncated in block of this size for training.' 'Default to the model max input length for single sentence inputs (take into account special tokens).' ) } , ) __snake_case :bool = field( default=_lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = None , ): '''simple docstring''' def _dataset(lowerCamelCase , lowerCamelCase=None ): if args.line_by_line: if ref_path is not None: if not args.whole_word_mask or not args.mlm: raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""" ) return LineByLineWithRefDataset( tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size , ref_path=lowerCamelCase , ) return LineByLineTextDataset(tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size ) else: return TextDataset( tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=lowerCamelCase , ) if evaluate: return _dataset(args.eval_data_file , args.eval_ref_file ) elif args.train_data_files: return ConcatDataset([_dataset(lowerCamelCase ) for f in glob(args.train_data_files )] ) else: return _dataset(args.train_data_file , args.train_ref_file ) def snake_case ( ): '''simple docstring''' __lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) __lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses() if data_args.eval_data_file is None and training_args.do_eval: raise ValueError( """Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """ """or remove the --do_eval argument.""" ) if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F'Output directory ({training_args.output_dir}) already exists and is not empty. Use' """ --overwrite_output_dir to overcome.""" ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( """Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("""Training/evaluation parameters %s""" , lowerCamelCase ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. if model_args.config_name: __lowercase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: __lowercase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir ) else: __lowercase = CONFIG_MAPPING[model_args.model_type]() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.tokenizer_name: __lowercase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: __lowercase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir ) else: raise ValueError( """You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another""" """ script, save it,and load it from here, using --tokenizer_name""" ) if model_args.model_name_or_path: __lowercase = AutoModelWithLMHead.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , ) else: logger.info("""Training new model from scratch""" ) __lowercase = AutoModelWithLMHead.from_config(lowerCamelCase ) model.resize_token_embeddings(len(lowerCamelCase ) ) if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm: raise ValueError( """BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the""" """--mlm flag (masked language modeling).""" ) if data_args.block_size <= 0: __lowercase = tokenizer.max_len # Our input block size will be the max possible for the model else: __lowercase = min(data_args.block_size , tokenizer.max_len ) # Get datasets __lowercase = ( get_dataset(lowerCamelCase , tokenizer=lowerCamelCase , cache_dir=model_args.cache_dir ) if training_args.do_train else None ) __lowercase = ( get_dataset(lowerCamelCase , tokenizer=lowerCamelCase , evaluate=lowerCamelCase , cache_dir=model_args.cache_dir ) if training_args.do_eval else None ) if config.model_type == "xlnet": __lowercase = DataCollatorForPermutationLanguageModeling( tokenizer=lowerCamelCase , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , ) else: if data_args.mlm and data_args.whole_word_mask: __lowercase = DataCollatorForWholeWordMask( tokenizer=lowerCamelCase , mlm_probability=data_args.mlm_probability ) else: __lowercase = DataCollatorForLanguageModeling( tokenizer=lowerCamelCase , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability ) # Initialize our Trainer __lowercase = Trainer( model=lowerCamelCase , args=lowerCamelCase , data_collator=lowerCamelCase , train_dataset=lowerCamelCase , eval_dataset=lowerCamelCase , prediction_loss_only=lowerCamelCase , ) # Training if training_args.do_train: __lowercase = ( model_args.model_name_or_path if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ) else None ) trainer.train(model_path=lowerCamelCase ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation __lowercase = {} if training_args.do_eval: logger.info("""*** Evaluate ***""" ) __lowercase = trainer.evaluate() __lowercase = math.exp(eval_output["""eval_loss"""] ) __lowercase = {"""perplexity""": perplexity} __lowercase = os.path.join(training_args.output_dir , """eval_results_lm.txt""" ) if trainer.is_world_master(): with open(lowerCamelCase , """w""" ) as writer: logger.info("""***** Eval results *****""" ) for key in sorted(result.keys() ): logger.info(""" %s = %s""" , lowerCamelCase , str(result[key] ) ) writer.write("""%s = %s\n""" % (key, str(result[key] )) ) results.update(lowerCamelCase ) return results def snake_case ( lowerCamelCase ): '''simple docstring''' main() if __name__ == "__main__": main()
53
1
import math from datetime import datetime, timedelta def snake_case ( lowerCamelCase ): '''simple docstring''' __lowercase = year % 19 __lowercase = year % 4 __lowercase = year % 7 __lowercase = math.floor(year / 100 ) __lowercase = math.floor((13 + 8 * leap_day_inhibits) / 25 ) __lowercase = leap_day_inhibits / 4 __lowercase = ( 15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number ) % 30 __lowercase = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7 # days to be added to March 21 __lowercase = (19 * metonic_cycle + secular_moon_shift) % 30 # PHM -> Paschal Full Moon __lowercase = ( 2 * julian_leap_year + 4 * non_leap_year + 6 * days_to_add + century_starting_point ) % 7 if days_to_add == 29 and days_from_phm_to_sunday == 6: return datetime(lowerCamelCase , 4 , 19 ) elif days_to_add == 28 and days_from_phm_to_sunday == 6: return datetime(lowerCamelCase , 4 , 18 ) else: return datetime(lowerCamelCase , 3 , 22 ) + timedelta( days=int(days_to_add + days_from_phm_to_sunday ) ) if __name__ == "__main__": for year in (1994, 2000, 2010, 2021, 2023): __UpperCamelCase : int = """will be""" if year > datetime.now().year else """was""" print(F'''Easter in {year} {tense} {gauss_easter(year)}''')
53
from __future__ import annotations def snake_case ( lowerCamelCase ): '''simple docstring''' if len(lowerCamelCase ) < 2: raise ValueError("""Monogons and Digons are not polygons in the Euclidean space""" ) if any(i <= 0 for i in nums ): raise ValueError("""All values must be greater than 0""" ) __lowercase = nums.copy() copy_nums.sort() return copy_nums[-1] < sum(copy_nums[:-1] ) if __name__ == "__main__": import doctest doctest.testmod()
53
1
import math def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = len(lowerCamelCase ) __lowercase = int(math.floor(math.sqrt(lowerCamelCase ) ) ) __lowercase = 0 while arr[min(lowerCamelCase , lowerCamelCase ) - 1] < x: __lowercase = step step += int(math.floor(math.sqrt(lowerCamelCase ) ) ) if prev >= n: return -1 while arr[prev] < x: __lowercase = prev + 1 if prev == min(lowerCamelCase , lowerCamelCase ): return -1 if arr[prev] == x: return prev return -1 if __name__ == "__main__": __UpperCamelCase : List[str] = input("""Enter numbers separated by a comma:\n""").strip() __UpperCamelCase : List[str] = [int(item) for item in user_input.split(""",""")] __UpperCamelCase : Dict = int(input("""Enter the number to be searched:\n""")) __UpperCamelCase : List[Any] = jump_search(arr, x) if res == -1: print("""Number not found!""") else: print(F'''Number {x} is at index {res}''')
53
from __future__ import annotations def snake_case ( lowerCamelCase ): '''simple docstring''' if not nums: return 0 __lowercase = nums[0] __lowercase = 0 for num in nums[1:]: __lowercase , __lowercase = ( max_excluding + num, max(lowerCamelCase , lowerCamelCase ), ) return max(lowerCamelCase , lowerCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
53
1
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING __UpperCamelCase : Any = logging.get_logger(__name__) __UpperCamelCase : Tuple = { """Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""", } class __UpperCamelCase ( _lowerCAmelCase ): __snake_case :str = 'instructblip_vision_model' def __init__( self : str , _lowerCAmelCase : Optional[int]=1408 , _lowerCAmelCase : Any=6144 , _lowerCAmelCase : Dict=39 , _lowerCAmelCase : Optional[int]=16 , _lowerCAmelCase : Optional[Any]=224 , _lowerCAmelCase : List[Any]=14 , _lowerCAmelCase : Optional[Any]="gelu" , _lowerCAmelCase : List[str]=1e-6 , _lowerCAmelCase : int=0.0 , _lowerCAmelCase : Tuple=1e-10 , _lowerCAmelCase : List[Any]=True , **_lowerCAmelCase : Tuple , ) -> str: """simple docstring""" super().__init__(**_lowerCAmelCase ) __lowercase = hidden_size __lowercase = intermediate_size __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = patch_size __lowercase = image_size __lowercase = initializer_range __lowercase = attention_dropout __lowercase = layer_norm_eps __lowercase = hidden_act __lowercase = qkv_bias @classmethod def _a ( cls : Tuple , _lowerCAmelCase : Union[str, os.PathLike] , **_lowerCAmelCase : Optional[int] ) -> "PretrainedConfig": """simple docstring""" cls._set_token_in_kwargs(_lowerCAmelCase ) __lowercase , __lowercase = cls.get_config_dict(_lowerCAmelCase , **_lowerCAmelCase ) # get the vision config dict if we are loading from InstructBlipConfig if config_dict.get("""model_type""" ) == "instructblip": __lowercase = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(_lowerCAmelCase , **_lowerCAmelCase ) class __UpperCamelCase ( _lowerCAmelCase ): __snake_case :int = 'instructblip_qformer' def __init__( self : Dict , _lowerCAmelCase : Optional[Any]=3_0522 , _lowerCAmelCase : Union[str, Any]=768 , _lowerCAmelCase : Dict=12 , _lowerCAmelCase : int=12 , _lowerCAmelCase : Optional[Any]=3072 , _lowerCAmelCase : str="gelu" , _lowerCAmelCase : Union[str, Any]=0.1 , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : Any=512 , _lowerCAmelCase : List[str]=0.02 , _lowerCAmelCase : int=1e-12 , _lowerCAmelCase : Any=0 , _lowerCAmelCase : Tuple="absolute" , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : List[Any]=1408 , **_lowerCAmelCase : str , ) -> Optional[int]: """simple docstring""" super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase ) __lowercase = vocab_size __lowercase = hidden_size __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = hidden_act __lowercase = intermediate_size __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = max_position_embeddings __lowercase = initializer_range __lowercase = layer_norm_eps __lowercase = position_embedding_type __lowercase = cross_attention_frequency __lowercase = encoder_hidden_size @classmethod def _a ( cls : Optional[Any] , _lowerCAmelCase : Union[str, os.PathLike] , **_lowerCAmelCase : List[Any] ) -> "PretrainedConfig": """simple docstring""" cls._set_token_in_kwargs(_lowerCAmelCase ) __lowercase , __lowercase = cls.get_config_dict(_lowerCAmelCase , **_lowerCAmelCase ) # get the qformer config dict if we are loading from InstructBlipConfig if config_dict.get("""model_type""" ) == "instructblip": __lowercase = config_dict["""qformer_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(_lowerCAmelCase , **_lowerCAmelCase ) class __UpperCamelCase ( _lowerCAmelCase ): __snake_case :str = 'instructblip' __snake_case :Optional[Any] = True def __init__( self : Optional[Any] , _lowerCAmelCase : str=None , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Dict=32 , **_lowerCAmelCase : List[str] ) -> str: """simple docstring""" super().__init__(**_lowerCAmelCase ) if vision_config is None: __lowercase = {} logger.info("""vision_config is None. initializing the InstructBlipVisionConfig with default values.""" ) if qformer_config is None: __lowercase = {} logger.info("""qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.""" ) if text_config is None: __lowercase = {} logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" ) __lowercase = InstructBlipVisionConfig(**_lowerCAmelCase ) __lowercase = InstructBlipQFormerConfig(**_lowerCAmelCase ) __lowercase = text_config["""model_type"""] if """model_type""" in text_config else """opt""" __lowercase = CONFIG_MAPPING[text_model_type](**_lowerCAmelCase ) __lowercase = self.text_config.tie_word_embeddings __lowercase = self.text_config.is_encoder_decoder __lowercase = num_query_tokens __lowercase = self.vision_config.hidden_size __lowercase = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES __lowercase = 1.0 __lowercase = 0.02 @classmethod def _a ( cls : Optional[Any] , _lowerCAmelCase : InstructBlipVisionConfig , _lowerCAmelCase : InstructBlipQFormerConfig , _lowerCAmelCase : PretrainedConfig , **_lowerCAmelCase : Any , ) -> List[str]: """simple docstring""" return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_lowerCAmelCase , ) def _a ( self : Dict ) -> Optional[Any]: """simple docstring""" __lowercase = copy.deepcopy(self.__dict__ ) __lowercase = self.vision_config.to_dict() __lowercase = self.qformer_config.to_dict() __lowercase = self.text_config.to_dict() __lowercase = self.__class__.model_type return output
53
import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() __UpperCamelCase : str = logging.get_logger(__name__) def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = RobertaPreLayerNormConfig.from_pretrained( lowerCamelCase , architectures=["""RobertaPreLayerNormForMaskedLM"""] ) # convert state_dict __lowercase = torch.load(hf_hub_download(repo_id=lowerCamelCase , filename="""pytorch_model.bin""" ) ) __lowercase = {} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith("""roberta.""" ): __lowercase = """roberta_prelayernorm.""" + tensor_key[len("""roberta.""" ) :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith(""".self.LayerNorm.weight""" ) or tensor_key.endswith(""".self.LayerNorm.bias""" ): continue __lowercase = tensor_value __lowercase = RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=lowerCamelCase , config=lowerCamelCase , state_dict=lowerCamelCase ) model.save_pretrained(lowerCamelCase ) # convert tokenizer __lowercase = AutoTokenizer.from_pretrained(lowerCamelCase ) tokenizer.save_pretrained(lowerCamelCase ) if __name__ == "__main__": __UpperCamelCase : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint-repo""", default=None, type=str, required=True, help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) __UpperCamelCase : Dict = parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
53
1
import inspect import unittest from transformers import MobileViTConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class __UpperCamelCase ( _lowerCAmelCase ): def _a ( self : Dict ) -> Any: """simple docstring""" __lowercase = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(_lowerCAmelCase , """hidden_sizes""" ) ) self.parent.assertTrue(hasattr(_lowerCAmelCase , """neck_hidden_sizes""" ) ) self.parent.assertTrue(hasattr(_lowerCAmelCase , """num_attention_heads""" ) ) class __UpperCamelCase : def __init__( self : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str]=13 , _lowerCAmelCase : int=32 , _lowerCAmelCase : str=2 , _lowerCAmelCase : List[Any]=3 , _lowerCAmelCase : List[str]=640 , _lowerCAmelCase : List[str]=4 , _lowerCAmelCase : int="silu" , _lowerCAmelCase : Tuple=3 , _lowerCAmelCase : Union[str, Any]=32 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : Optional[int]=0.1 , _lowerCAmelCase : Union[str, Any]=0.02 , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : str=10 , _lowerCAmelCase : Optional[Any]=None , ) -> Tuple: """simple docstring""" __lowercase = parent __lowercase = batch_size __lowercase = image_size __lowercase = patch_size __lowercase = num_channels __lowercase = last_hidden_size __lowercase = num_attention_heads __lowercase = hidden_act __lowercase = conv_kernel_size __lowercase = output_stride __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = classifier_dropout_prob __lowercase = use_labels __lowercase = is_training __lowercase = num_labels __lowercase = initializer_range __lowercase = scope def _a ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" __lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowercase = None __lowercase = None if self.use_labels: __lowercase = ids_tensor([self.batch_size] , self.num_labels ) __lowercase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) __lowercase = self.get_config() return config, pixel_values, labels, pixel_labels def _a ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" return MobileViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def _a ( self : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict ) -> Union[str, Any]: """simple docstring""" __lowercase = MobileViTModel(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() __lowercase = model(_lowerCAmelCase ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def _a ( self : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : Dict ) -> List[str]: """simple docstring""" __lowercase = self.num_labels __lowercase = MobileViTForImageClassification(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() __lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _a ( self : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : str ) -> Optional[Any]: """simple docstring""" __lowercase = self.num_labels __lowercase = MobileViTForSemanticSegmentation(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() __lowercase = model(_lowerCAmelCase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) __lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def _a ( self : List[Any] ) -> Optional[Any]: """simple docstring""" __lowercase = self.prepare_config_and_inputs() __lowercase , __lowercase , __lowercase , __lowercase = config_and_inputs __lowercase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): __snake_case :List[Any] = ( (MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation) if is_torch_available() else () ) __snake_case :List[Any] = ( { 'feature-extraction': MobileViTModel, 'image-classification': MobileViTForImageClassification, 'image-segmentation': MobileViTForSemanticSegmentation, } if is_torch_available() else {} ) __snake_case :List[str] = False __snake_case :Tuple = False __snake_case :Union[str, Any] = False __snake_case :int = False def _a ( self : int ) -> Optional[Any]: """simple docstring""" __lowercase = MobileViTModelTester(self ) __lowercase = MobileViTConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase ) def _a ( self : Union[str, Any] ) -> Dict: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="""MobileViT does not use inputs_embeds""" ) def _a ( self : str ) -> Dict: """simple docstring""" pass @unittest.skip(reason="""MobileViT does not support input and output embeddings""" ) def _a ( self : List[Any] ) -> Any: """simple docstring""" pass @unittest.skip(reason="""MobileViT does not output attentions""" ) def _a ( self : Tuple ) -> Tuple: """simple docstring""" pass def _a ( self : int ) -> str: """simple docstring""" __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase = model_class(_lowerCAmelCase ) __lowercase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase = [*signature.parameters.keys()] __lowercase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , _lowerCAmelCase ) @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def _a ( self : Union[str, Any] ) -> Any: """simple docstring""" pass def _a ( self : Dict ) -> Dict: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def _a ( self : str ) -> str: """simple docstring""" def check_hidden_states_output(_lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any ): __lowercase = model_class(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() with torch.no_grad(): __lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) ) __lowercase = outputs.hidden_states __lowercase = 5 self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase ) # MobileViT's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. __lowercase = 2 for i in range(len(_lowerCAmelCase ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase = True check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowercase = True check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) def _a ( self : List[str] ) -> Union[str, Any]: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase ) def _a ( self : str ) -> Optional[int]: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCAmelCase ) @slow def _a ( self : Any ) -> Union[str, Any]: """simple docstring""" for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase = MobileViTModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) def snake_case ( ): '''simple docstring''' __lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class __UpperCamelCase ( unittest.TestCase ): @cached_property def _a ( self : Optional[Any] ) -> Any: """simple docstring""" return MobileViTImageProcessor.from_pretrained("""apple/mobilevit-xx-small""" ) if is_vision_available() else None @slow def _a ( self : Optional[int] ) -> Optional[int]: """simple docstring""" __lowercase = MobileViTForImageClassification.from_pretrained("""apple/mobilevit-xx-small""" ).to(_lowerCAmelCase ) __lowercase = self.default_image_processor __lowercase = prepare_img() __lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase ) # forward pass with torch.no_grad(): __lowercase = model(**_lowerCAmelCase ) # verify the logits __lowercase = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _lowerCAmelCase ) __lowercase = torch.tensor([-1.9_364, -1.2_327, -0.4_653] ).to(_lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) ) @slow def _a ( self : List[str] ) -> str: """simple docstring""" __lowercase = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" ) __lowercase = model.to(_lowerCAmelCase ) __lowercase = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" ) __lowercase = prepare_img() __lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase ) # forward pass with torch.no_grad(): __lowercase = model(**_lowerCAmelCase ) __lowercase = outputs.logits # verify the logits __lowercase = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape , _lowerCAmelCase ) __lowercase = torch.tensor( [ [[6.9_713, 6.9_786, 7.2_422], [7.2_893, 7.2_825, 7.4_446], [7.6_580, 7.8_797, 7.9_420]], [[-10.6_869, -10.3_250, -10.3_471], [-10.4_228, -9.9_868, -9.7_132], [-11.0_405, -11.0_221, -10.7_318]], [[-3.3_089, -2.8_539, -2.6_740], [-3.2_706, -2.5_621, -2.5_108], [-3.2_534, -2.6_615, -2.6_651]], ] , device=_lowerCAmelCase , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _lowerCAmelCase , atol=1e-4 ) ) @slow def _a ( self : Dict ) -> Optional[int]: """simple docstring""" __lowercase = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" ) __lowercase = model.to(_lowerCAmelCase ) __lowercase = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" ) __lowercase = prepare_img() __lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase ) # forward pass with torch.no_grad(): __lowercase = model(**_lowerCAmelCase ) __lowercase = outputs.logits.detach().cpu() __lowercase = image_processor.post_process_semantic_segmentation(outputs=_lowerCAmelCase , target_sizes=[(50, 60)] ) __lowercase = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape , _lowerCAmelCase ) __lowercase = image_processor.post_process_semantic_segmentation(outputs=_lowerCAmelCase ) __lowercase = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape , _lowerCAmelCase )
53
import numpy as np from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' if (ksize % 2) == 0: __lowercase = ksize + 1 __lowercase = np.zeros((ksize, ksize) , dtype=np.floataa ) # each value for y in range(lowerCamelCase ): for x in range(lowerCamelCase ): # distance from center __lowercase = x - ksize // 2 __lowercase = y - ksize // 2 # degree to radiant __lowercase = theta / 180 * np.pi __lowercase = np.cos(_theta ) __lowercase = np.sin(_theta ) # get kernel x __lowercase = cos_theta * px + sin_theta * py # get kernel y __lowercase = -sin_theta * px + cos_theta * py # fill kernel __lowercase = np.exp( -(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi ) return gabor if __name__ == "__main__": import doctest doctest.testmod() # read original image __UpperCamelCase : List[Any] = imread("""../image_data/lena.jpg""") # turn image in gray scale value __UpperCamelCase : Union[str, Any] = cvtColor(img, COLOR_BGR2GRAY) # Apply multiple Kernel to detect edges __UpperCamelCase : Union[str, Any] = np.zeros(gray.shape[:2]) for theta in [0, 30, 60, 90, 120, 150]: __UpperCamelCase : Tuple = gabor_filter_kernel(10, 8, theta, 10, 0, 0) out += filteraD(gray, CV_8UC3, kernel_aa) __UpperCamelCase : List[str] = out / out.max() * 255 __UpperCamelCase : List[str] = out.astype(np.uinta) imshow("""Original""", gray) imshow("""Gabor filter with 20x20 mask and 6 directions""", out) waitKey(0)
53
1
import unittest from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __UpperCamelCase : Dict = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ): __snake_case :Dict = XLMProphetNetTokenizer __snake_case :Optional[Any] = False __snake_case :str = True def _a ( self : Dict ) -> List[str]: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing __lowercase = XLMProphetNetTokenizer(_lowerCAmelCase , keep_accents=_lowerCAmelCase ) tokenizer.save_pretrained(self.tmpdirname ) def _a ( self : str ) -> List[str]: """simple docstring""" __lowercase = """[PAD]""" __lowercase = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) , _lowerCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) , _lowerCAmelCase ) def _a ( self : int ) -> Optional[Any]: """simple docstring""" __lowercase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """[PAD]""" ) self.assertEqual(vocab_keys[1] , """[CLS]""" ) self.assertEqual(vocab_keys[-1] , """j""" ) self.assertEqual(len(_lowerCAmelCase ) , 1012 ) def _a ( self : Any ) -> List[str]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1012 ) def _a ( self : Tuple ) -> List[str]: """simple docstring""" __lowercase = XLMProphetNetTokenizer(_lowerCAmelCase , keep_accents=_lowerCAmelCase ) __lowercase = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(_lowerCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) __lowercase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( _lowerCAmelCase , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) __lowercase = tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) self.assertListEqual( _lowerCAmelCase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4] ] , ) __lowercase = tokenizer.convert_ids_to_tokens(_lowerCAmelCase ) self.assertListEqual( _lowerCAmelCase , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """[UNK]""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """[UNK]""", """.""", ] , ) @cached_property def _a ( self : Optional[int] ) -> Any: """simple docstring""" return XLMProphetNetTokenizer.from_pretrained("""microsoft/xprophetnet-large-wiki100-cased""" ) @slow def _a ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __lowercase = """Hello World!""" __lowercase = [3_5389, 6672, 49, 2] self.assertListEqual(_lowerCAmelCase , self.big_tokenizer.encode(_lowerCAmelCase ) ) @slow def _a ( self : Dict ) -> str: """simple docstring""" __lowercase = {"""input_ids""": [[1_1073, 8_2783, 18, 26, 8_2783, 549, 5_1540, 248, 1_7209, 1301, 217, 20, 21_5186, 1325, 147, 1_7209, 1301, 217, 20, 5_6370, 53, 12_2020, 20, 1_6477, 27, 8_7355, 4548, 20, 4728, 7_8392, 17, 15_9969, 18, 26, 2_4491, 629, 15, 538, 2_2704, 5439, 15, 2788, 2_4491, 9885, 15, 4_3534, 605, 15, 814, 1_8403, 3_3200, 29, 15, 4_3534, 2_4458, 1_2410, 111, 2_4966, 8_3669, 9637, 14_4068, 26, 850, 2_2346, 27, 147, 2_4966, 8_3669, 8_3490, 26, 3_9113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 12_2020, 11_5785, 34, 816, 1339, 4_6887, 18, 147, 5_3905, 1951, 4_2238, 4_1170, 1_7732, 834, 436, 15, 2_7523, 9_8733, 217, 147, 5542, 4981, 930, 1_7347, 16, 2], [2_0091, 629, 94, 8_2786, 58, 490, 20, 1528, 84, 5_3905, 344, 8_0592, 11_0128, 1_8822, 5267, 1306, 62, 15_2537, 308, 7997, 401, 12_4427, 549, 3_5442, 225, 109, 1_5055, 2_5748, 147, 7119, 4_3712, 34, 767, 13_5366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 6_3784, 11_9466, 17, 14_7808, 8_8214, 18, 656, 81, 32, 3296, 1_0280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_lowerCAmelCase , model_name="""microsoft/xprophetnet-large-wiki100-cased""" , revision="""1acad1643ddd54a44df6a1b797ada8373685d90e""" , )
53
import argparse import json import os import time import zipfile from get_ci_error_statistics import download_artifact, get_artifacts_links from transformers import logging __UpperCamelCase : Optional[Any] = logging.get_logger(__name__) def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = set() __lowercase = [] def parse_line(lowerCamelCase ): for line in fp: if isinstance(lowerCamelCase , lowerCamelCase ): __lowercase = line.decode("""UTF-8""" ) if "warnings summary (final)" in line: continue # This means we are outside the body of a warning elif not line.startswith(""" """ ): # process a single warning and move it to `selected_warnings`. if len(lowerCamelCase ) > 0: __lowercase = """\n""".join(lowerCamelCase ) # Only keep the warnings specified in `targets` if any(F': {x}: ' in warning for x in targets ): selected_warnings.add(lowerCamelCase ) buffer.clear() continue else: __lowercase = line.strip() buffer.append(lowerCamelCase ) if from_gh: for filename in os.listdir(lowerCamelCase ): __lowercase = os.path.join(lowerCamelCase , lowerCamelCase ) if not os.path.isdir(lowerCamelCase ): # read the file if filename != "warnings.txt": continue with open(lowerCamelCase ) as fp: parse_line(lowerCamelCase ) else: try: with zipfile.ZipFile(lowerCamelCase ) as z: for filename in z.namelist(): if not os.path.isdir(lowerCamelCase ): # read the file if filename != "warnings.txt": continue with z.open(lowerCamelCase ) as fp: parse_line(lowerCamelCase ) except Exception: logger.warning( F'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' ) return selected_warnings def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = set() __lowercase = [os.path.join(lowerCamelCase , lowerCamelCase ) for p in os.listdir(lowerCamelCase ) if (p.endswith(""".zip""" ) or from_gh)] for p in paths: selected_warnings.update(extract_warnings_from_single_artifact(lowerCamelCase , lowerCamelCase ) ) return selected_warnings if __name__ == "__main__": def snake_case ( lowerCamelCase ): '''simple docstring''' return values.split(""",""" ) __UpperCamelCase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""") parser.add_argument( """--output_dir""", type=str, required=True, help="""Where to store the downloaded artifacts and other result files.""", ) parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""") # optional parameters parser.add_argument( """--targets""", default="""DeprecationWarning,UserWarning,FutureWarning""", type=list_str, help="""Comma-separated list of target warning(s) which we want to extract.""", ) parser.add_argument( """--from_gh""", action="""store_true""", help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""", ) __UpperCamelCase : List[str] = parser.parse_args() __UpperCamelCase : Union[str, Any] = args.from_gh if from_gh: # The artifacts have to be downloaded using `actions/download-artifact@v3` pass else: os.makedirs(args.output_dir, exist_ok=True) # get download links __UpperCamelCase : Any = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) # download artifacts for idx, (name, url) in enumerate(artifacts.items()): print(name) print(url) print("""=""" * 80) download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) # extract warnings from artifacts __UpperCamelCase : Union[str, Any] = extract_warnings(args.output_dir, args.targets) __UpperCamelCase : Any = sorted(selected_warnings) with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
53
1
import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, require_torch_gpu, skip_mps, slow, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): __snake_case :Any = StableUnCLIPImgaImgPipeline __snake_case :List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS __snake_case :Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS __snake_case :List[Any] = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess __snake_case :Dict = frozenset([] ) def _a ( self : Optional[Any] ) -> str: """simple docstring""" __lowercase = 32 __lowercase = embedder_hidden_size # image encoding components __lowercase = CLIPImageProcessor(crop_size=32 , size=32 ) torch.manual_seed(0 ) __lowercase = CLIPVisionModelWithProjection( CLIPVisionConfig( hidden_size=_lowerCAmelCase , projection_dim=_lowerCAmelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) ) # regular denoising components torch.manual_seed(0 ) __lowercase = StableUnCLIPImageNormalizer(embedding_dim=_lowerCAmelCase ) __lowercase = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" ) torch.manual_seed(0 ) __lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) torch.manual_seed(0 ) __lowercase = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=_lowerCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) __lowercase = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_lowerCAmelCase , layers_per_block=1 , upcast_attention=_lowerCAmelCase , use_linear_projection=_lowerCAmelCase , ) torch.manual_seed(0 ) __lowercase = DDIMScheduler( beta_schedule="""scaled_linear""" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=_lowerCAmelCase , steps_offset=1 , ) torch.manual_seed(0 ) __lowercase = AutoencoderKL() __lowercase = { # image encoding components """feature_extractor""": feature_extractor, """image_encoder""": image_encoder.eval(), # image noising components """image_normalizer""": image_normalizer.eval(), """image_noising_scheduler""": image_noising_scheduler, # regular denoising components """tokenizer""": tokenizer, """text_encoder""": text_encoder.eval(), """unet""": unet.eval(), """scheduler""": scheduler, """vae""": vae.eval(), } return components def _a ( self : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any]=0 , _lowerCAmelCase : Optional[int]=True ) -> Tuple: """simple docstring""" if str(_lowerCAmelCase ).startswith("""mps""" ): __lowercase = torch.manual_seed(_lowerCAmelCase ) else: __lowercase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase ) __lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase ) if pil_image: __lowercase = input_image * 0.5 + 0.5 __lowercase = input_image.clamp(0 , 1 ) __lowercase = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() __lowercase = DiffusionPipeline.numpy_to_pil(_lowerCAmelCase )[0] return { "prompt": "An anime racoon running a marathon", "image": input_image, "generator": generator, "num_inference_steps": 2, "output_type": "np", } @skip_mps def _a ( self : str ) -> Any: """simple docstring""" __lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator __lowercase = self.get_dummy_components() __lowercase = StableUnCLIPImgaImgPipeline(**_lowerCAmelCase ) __lowercase = sd_pipe.to(_lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) __lowercase = self.get_dummy_inputs(_lowerCAmelCase ) inputs.update({"""image_embeds""": None} ) __lowercase = sd_pipe(**_lowerCAmelCase ).images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __lowercase = np.array([0.3_872, 0.7_224, 0.5_601, 0.4_741, 0.6_872, 0.5_814, 0.4_636, 0.3_867, 0.5_078] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _a ( self : List[str] ) -> Optional[Any]: """simple docstring""" __lowercase = torch_device in ["""cpu""", """mps"""] self._test_attention_slicing_forward_pass(test_max_difference=_lowerCAmelCase ) def _a ( self : Tuple ) -> Optional[Any]: """simple docstring""" __lowercase = torch_device in ["""cpu""", """mps"""] self._test_inference_batch_single_identical(test_max_difference=_lowerCAmelCase ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def _a ( self : int ) -> Tuple: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(test_max_difference=_lowerCAmelCase ) @slow @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): def _a ( self : int ) -> List[Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _a ( self : List[str] ) -> int: """simple docstring""" __lowercase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" ) __lowercase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy""" ) __lowercase = StableUnCLIPImgaImgPipeline.from_pretrained( """fusing/stable-unclip-2-1-l-img2img""" , torch_dtype=torch.floataa ) pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 ) __lowercase = pipe(_lowerCAmelCase , """anime turle""" , generator=_lowerCAmelCase , output_type="""np""" ) __lowercase = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase ) def _a ( self : str ) -> Tuple: """simple docstring""" __lowercase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" ) __lowercase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy""" ) __lowercase = StableUnCLIPImgaImgPipeline.from_pretrained( """fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa ) pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 ) __lowercase = pipe(_lowerCAmelCase , """anime turle""" , generator=_lowerCAmelCase , output_type="""np""" ) __lowercase = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase ) def _a ( self : Dict ) -> List[str]: """simple docstring""" __lowercase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" ) torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __lowercase = StableUnCLIPImgaImgPipeline.from_pretrained( """fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa ) __lowercase = pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __lowercase = pipe( _lowerCAmelCase , """anime turtle""" , num_inference_steps=2 , output_type="""np""" , ) __lowercase = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
53
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __UpperCamelCase : Any = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Dict = [ """MRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """MraForMaskedLM""", """MraForMultipleChoice""", """MraForQuestionAnswering""", """MraForSequenceClassification""", """MraForTokenClassification""", """MraLayer""", """MraModel""", """MraPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys __UpperCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
53
1
import argparse import torch from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert from transformers.utils import logging logging.set_verbosity_info() def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = RemBertConfig.from_json_file(lowerCamelCase ) print("""Building PyTorch model from configuration: {}""".format(str(lowerCamelCase ) ) ) __lowercase = RemBertModel(lowerCamelCase ) # Load weights from tf checkpoint load_tf_weights_in_rembert(lowerCamelCase , lowerCamelCase , lowerCamelCase ) # Save pytorch-model print("""Save PyTorch model to {}""".format(lowerCamelCase ) ) torch.save(model.state_dict() , lowerCamelCase ) if __name__ == "__main__": __UpperCamelCase : int = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--rembert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained RemBERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) __UpperCamelCase : int = parser.parse_args() convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
53
def snake_case ( lowerCamelCase ): '''simple docstring''' if not isinstance(lowerCamelCase , lowerCamelCase ): raise ValueError("""check_bouncy() accepts only integer arguments""" ) __lowercase = str(lowerCamelCase ) __lowercase = """""".join(sorted(lowerCamelCase ) ) return sorted_str_n != str_n and sorted_str_n[::-1] != str_n def snake_case ( lowerCamelCase = 99 ): '''simple docstring''' if not 0 < percent < 100: raise ValueError("""solution() only accepts values from 0 to 100""" ) __lowercase = 0 __lowercase = 1 while True: if check_bouncy(lowerCamelCase ): bouncy_num += 1 if (bouncy_num / num) * 100 >= percent: return num num += 1 if __name__ == "__main__": from doctest import testmod testmod() print(F'''{solution(99)}''')
53
1
def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' if digit_amount > 0: return round(number - int(lowerCamelCase ) , lowerCamelCase ) return number - int(lowerCamelCase ) if __name__ == "__main__": print(decimal_isolate(1.5_3, 0)) print(decimal_isolate(3_5.3_4_5, 1)) print(decimal_isolate(3_5.3_4_5, 2)) print(decimal_isolate(3_5.3_4_5, 3)) print(decimal_isolate(-1_4.7_8_9, 3)) print(decimal_isolate(0, 2)) print(decimal_isolate(-1_4.1_2_3, 1)) print(decimal_isolate(-1_4.1_2_3, 2)) print(decimal_isolate(-1_4.1_2_3, 3))
53
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __UpperCamelCase : Tuple = { """configuration_swiftformer""": [ """SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwiftFormerConfig""", """SwiftFormerOnnxConfig""", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Optional[Any] = [ """SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """SwiftFormerForImageClassification""", """SwiftFormerModel""", """SwiftFormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_swiftformer import ( SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, SwiftFormerConfig, SwiftFormerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swiftformer import ( SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, SwiftFormerForImageClassification, SwiftFormerModel, SwiftFormerPreTrainedModel, ) else: import sys __UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
53
1
import argparse from typing import List import evaluate import numpy as np import torch from datasets import DatasetDict, load_dataset # New Code # # We'll be using StratifiedKFold for this example from sklearn.model_selection import StratifiedKFold from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to perform Cross Validation, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __UpperCamelCase : int = 16 __UpperCamelCase : int = 32 def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = 16 ): '''simple docstring''' __lowercase = AutoTokenizer.from_pretrained("""bert-base-cased""" ) __lowercase = DatasetDict( { """train""": dataset["""train"""].select(lowerCamelCase ), """validation""": dataset["""train"""].select(lowerCamelCase ), """test""": dataset["""validation"""], } ) def tokenize_function(lowerCamelCase ): # max_length=None => use the model max length (it's actually the default) __lowercase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase , max_length=lowerCamelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): __lowercase = datasets.map( lowerCamelCase , batched=lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __lowercase = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(lowerCamelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. __lowercase = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": __lowercase = 16 elif accelerator.mixed_precision != "no": __lowercase = 8 else: __lowercase = None return tokenizer.pad( lowerCamelCase , padding="""longest""" , max_length=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_tensors="""pt""" , ) # Instantiate dataloaders. __lowercase = DataLoader( tokenized_datasets["""train"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase ) __lowercase = DataLoader( tokenized_datasets["""validation"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase ) __lowercase = DataLoader( tokenized_datasets["""test"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase ) return train_dataloader, eval_dataloader, test_dataloader def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = [] # Download the dataset __lowercase = load_dataset("""glue""" , """mrpc""" ) # Create our splits __lowercase = StratifiedKFold(n_splits=int(args.num_folds ) ) # Initialize accelerator __lowercase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __lowercase = config["""lr"""] __lowercase = int(config["""num_epochs"""] ) __lowercase = int(config["""seed"""] ) __lowercase = int(config["""batch_size"""] ) __lowercase = evaluate.load("""glue""" , """mrpc""" ) # If the batch size is too big we use gradient accumulation __lowercase = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: __lowercase = batch_size // MAX_GPU_BATCH_SIZE __lowercase = MAX_GPU_BATCH_SIZE set_seed(lowerCamelCase ) # New Code # # Create our folds: __lowercase = kfold.split(np.zeros(datasets["""train"""].num_rows ) , datasets["""train"""]["""label"""] ) __lowercase = [] # Iterate over them for i, (train_idxs, valid_idxs) in enumerate(lowerCamelCase ): __lowercase , __lowercase , __lowercase = get_fold_dataloaders( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __lowercase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCamelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __lowercase = model.to(accelerator.device ) # Instantiate optimizer __lowercase = AdamW(params=model.parameters() , lr=lowerCamelCase ) # Instantiate scheduler __lowercase = get_linear_schedule_with_warmup( optimizer=lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(lowerCamelCase ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = accelerator.prepare( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) # Now we train the model for epoch in range(lowerCamelCase ): model.train() for step, batch in enumerate(lowerCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) __lowercase = model(**lowerCamelCase ) __lowercase = outputs.loss __lowercase = loss / gradient_accumulation_steps accelerator.backward(lowerCamelCase ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowerCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __lowercase = model(**lowerCamelCase ) __lowercase = outputs.logits.argmax(dim=-1 ) __lowercase , __lowercase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=lowerCamelCase , references=lowerCamelCase , ) __lowercase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'epoch {epoch}:' , lowerCamelCase ) # New Code # # We also run predictions on the test set at the very end __lowercase = [] for step, batch in enumerate(lowerCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __lowercase = model(**lowerCamelCase ) __lowercase = outputs.logits __lowercase , __lowercase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) fold_predictions.append(predictions.cpu() ) if i == 0: # We need all of the test predictions test_references.append(references.cpu() ) # Use accelerator.print to print only on the main process. test_predictions.append(torch.cat(lowerCamelCase , dim=0 ) ) # We now need to release all our memory and get rid of the current model, optimizer, etc accelerator.free_memory() # New Code # # Finally we check the accuracy of our folded results: __lowercase = torch.cat(lowerCamelCase , dim=0 ) __lowercase = torch.stack(lowerCamelCase , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 ) __lowercase = metric.compute(predictions=lowerCamelCase , references=lowerCamelCase ) accelerator.print("""Average test metrics from all folds:""" , lowerCamelCase ) def snake_case ( ): '''simple docstring''' __lowercase = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=lowerCamelCase , default=lowerCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) # New Code # parser.add_argument("""--num_folds""" , type=lowerCamelCase , default=3 , help="""The number of splits to perform across the dataset""" ) __lowercase = parser.parse_args() __lowercase = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(lowerCamelCase , lowerCamelCase ) if __name__ == "__main__": main()
53
import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class __UpperCamelCase ( unittest.TestCase ): def _a ( self : Union[str, Any] ) -> Any: """simple docstring""" super().tearDown() gc.collect() def _a ( self : List[str] ) -> int: """simple docstring""" __lowercase , __lowercase = FlaxStableDiffusionPipeline.from_pretrained( """stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , ) __lowercase = """A painting of a squirrel eating a burger""" __lowercase = jax.device_count() __lowercase = num_samples * [prompt] __lowercase = sd_pipe.prepare_inputs(_lowerCAmelCase ) __lowercase = replicate(_lowerCAmelCase ) __lowercase = shard(_lowerCAmelCase ) __lowercase = jax.random.PRNGKey(0 ) __lowercase = jax.random.split(_lowerCAmelCase , jax.device_count() ) __lowercase = sd_pipe(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_inference_steps=25 , jit=_lowerCAmelCase )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) __lowercase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __lowercase = images[0, 253:256, 253:256, -1] __lowercase = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __lowercase = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] ) print(F'output_slice: {output_slice}' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 def _a ( self : str ) -> List[Any]: """simple docstring""" __lowercase = """stabilityai/stable-diffusion-2""" __lowercase , __lowercase = FlaxDPMSolverMultistepScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" ) __lowercase , __lowercase = FlaxStableDiffusionPipeline.from_pretrained( _lowerCAmelCase , scheduler=_lowerCAmelCase , revision="""bf16""" , dtype=jnp.bfloataa , ) __lowercase = scheduler_params __lowercase = """A painting of a squirrel eating a burger""" __lowercase = jax.device_count() __lowercase = num_samples * [prompt] __lowercase = sd_pipe.prepare_inputs(_lowerCAmelCase ) __lowercase = replicate(_lowerCAmelCase ) __lowercase = shard(_lowerCAmelCase ) __lowercase = jax.random.PRNGKey(0 ) __lowercase = jax.random.split(_lowerCAmelCase , jax.device_count() ) __lowercase = sd_pipe(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_inference_steps=25 , jit=_lowerCAmelCase )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) __lowercase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __lowercase = images[0, 253:256, 253:256, -1] __lowercase = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __lowercase = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] ) print(F'output_slice: {output_slice}' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
53
1
from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase = 1 / sqrt(2 ) ): '''simple docstring''' __lowercase = tau * frequency / samplerate __lowercase = sin(lowerCamelCase ) __lowercase = cos(lowerCamelCase ) __lowercase = _sin / (2 * q_factor) __lowercase = (1 - _cos) / 2 __lowercase = 1 - _cos __lowercase = 1 + alpha __lowercase = -2 * _cos __lowercase = 1 - alpha __lowercase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase = 1 / sqrt(2 ) ): '''simple docstring''' __lowercase = tau * frequency / samplerate __lowercase = sin(lowerCamelCase ) __lowercase = cos(lowerCamelCase ) __lowercase = _sin / (2 * q_factor) __lowercase = (1 + _cos) / 2 __lowercase = -1 - _cos __lowercase = 1 + alpha __lowercase = -2 * _cos __lowercase = 1 - alpha __lowercase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase = 1 / sqrt(2 ) ): '''simple docstring''' __lowercase = tau * frequency / samplerate __lowercase = sin(lowerCamelCase ) __lowercase = cos(lowerCamelCase ) __lowercase = _sin / (2 * q_factor) __lowercase = _sin / 2 __lowercase = 0 __lowercase = -ba __lowercase = 1 + alpha __lowercase = -2 * _cos __lowercase = 1 - alpha __lowercase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase = 1 / sqrt(2 ) ): '''simple docstring''' __lowercase = tau * frequency / samplerate __lowercase = sin(lowerCamelCase ) __lowercase = cos(lowerCamelCase ) __lowercase = _sin / (2 * q_factor) __lowercase = 1 - alpha __lowercase = -2 * _cos __lowercase = 1 + alpha __lowercase = IIRFilter(2 ) filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] ) return filt def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = 1 / sqrt(2 ) , ): '''simple docstring''' __lowercase = tau * frequency / samplerate __lowercase = sin(lowerCamelCase ) __lowercase = cos(lowerCamelCase ) __lowercase = _sin / (2 * q_factor) __lowercase = 10 ** (gain_db / 40) __lowercase = 1 + alpha * big_a __lowercase = -2 * _cos __lowercase = 1 - alpha * big_a __lowercase = 1 + alpha / big_a __lowercase = -2 * _cos __lowercase = 1 - alpha / big_a __lowercase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = 1 / sqrt(2 ) , ): '''simple docstring''' __lowercase = tau * frequency / samplerate __lowercase = sin(lowerCamelCase ) __lowercase = cos(lowerCamelCase ) __lowercase = _sin / (2 * q_factor) __lowercase = 10 ** (gain_db / 40) __lowercase = (big_a + 1) - (big_a - 1) * _cos __lowercase = (big_a + 1) + (big_a - 1) * _cos __lowercase = (big_a - 1) - (big_a + 1) * _cos __lowercase = (big_a - 1) + (big_a + 1) * _cos __lowercase = 2 * sqrt(lowerCamelCase ) * alpha __lowercase = big_a * (pmc + aaa) __lowercase = 2 * big_a * mpc __lowercase = big_a * (pmc - aaa) __lowercase = ppmc + aaa __lowercase = -2 * pmpc __lowercase = ppmc - aaa __lowercase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = 1 / sqrt(2 ) , ): '''simple docstring''' __lowercase = tau * frequency / samplerate __lowercase = sin(lowerCamelCase ) __lowercase = cos(lowerCamelCase ) __lowercase = _sin / (2 * q_factor) __lowercase = 10 ** (gain_db / 40) __lowercase = (big_a + 1) - (big_a - 1) * _cos __lowercase = (big_a + 1) + (big_a - 1) * _cos __lowercase = (big_a - 1) - (big_a + 1) * _cos __lowercase = (big_a - 1) + (big_a + 1) * _cos __lowercase = 2 * sqrt(lowerCamelCase ) * alpha __lowercase = big_a * (ppmc + aaa) __lowercase = -2 * big_a * pmpc __lowercase = big_a * (ppmc - aaa) __lowercase = pmc + aaa __lowercase = 2 * mpc __lowercase = pmc - aaa __lowercase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt
53
import heapq import sys import numpy as np __UpperCamelCase : List[str] = tuple[int, int] class __UpperCamelCase : def __init__( self : Optional[int] ) -> Dict: """simple docstring""" __lowercase = [] __lowercase = set() def _a ( self : int ) -> List[Any]: """simple docstring""" if not self.empty(): return self.elements[0][0] else: return float("""inf""" ) def _a ( self : Dict ) -> Optional[Any]: """simple docstring""" return len(self.elements ) == 0 def _a ( self : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] ) -> Dict: """simple docstring""" if item not in self.set: heapq.heappush(self.elements , (priority, item) ) self.set.add(_lowerCAmelCase ) else: # update # print("update", item) __lowercase = [] ((__lowercase) , (__lowercase)) = heapq.heappop(self.elements ) while x != item: temp.append((pri, x) ) ((__lowercase) , (__lowercase)) = heapq.heappop(self.elements ) temp.append((priority, item) ) for pro, xxx in temp: heapq.heappush(self.elements , (pro, xxx) ) def _a ( self : List[str] , _lowerCAmelCase : Optional[Any] ) -> Dict: """simple docstring""" if item in self.set: self.set.remove(_lowerCAmelCase ) __lowercase = [] ((__lowercase) , (__lowercase)) = heapq.heappop(self.elements ) while x != item: temp.append((pro, x) ) ((__lowercase) , (__lowercase)) = heapq.heappop(self.elements ) for prito, yyy in temp: heapq.heappush(self.elements , (prito, yyy) ) def _a ( self : Any ) -> List[Any]: """simple docstring""" return self.elements[0][1] def _a ( self : Optional[int] ) -> List[Any]: """simple docstring""" ((__lowercase) , (__lowercase)) = heapq.heappop(self.elements ) self.set.remove(_lowerCAmelCase ) return (priority, item) def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = np.array(lowerCamelCase ) __lowercase = np.array(lowerCamelCase ) return np.linalg.norm(a - b ) def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' return consistent_heuristic(lowerCamelCase , lowerCamelCase ) // t def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] ) def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = g_function[start] + Wa * heuristics[i](lowerCamelCase , lowerCamelCase ) return ans def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = np.chararray((n, n) ) for i in range(lowerCamelCase ): for j in range(lowerCamelCase ): __lowercase = """*""" for i in range(lowerCamelCase ): for j in range(lowerCamelCase ): if (j, (n - 1) - i) in blocks: __lowercase = """#""" __lowercase = """-""" __lowercase = back_pointer[goal] while x != start: ((__lowercase) , (__lowercase)) = x # print(x) __lowercase = """-""" __lowercase = back_pointer[x] __lowercase = """-""" for i in range(lowerCamelCase ): for j in range(lowerCamelCase ): if (i, j) == (0, n - 1): print(grid[i][j] , end=""" """ ) print("""<-- End position""" , end=""" """ ) else: print(grid[i][j] , end=""" """ ) print() print("""^""" ) print("""Start position""" ) print() print("""# is an obstacle""" ) print("""- is the path taken by algorithm""" ) print("""PATH TAKEN BY THE ALGORITHM IS:-""" ) __lowercase = back_pointer[goal] while x != start: print(lowerCamelCase , end=""" """ ) __lowercase = back_pointer[x] print(lowerCamelCase ) sys.exit() def snake_case ( lowerCamelCase ): '''simple docstring''' if p[0] < 0 or p[0] > n - 1: return False if p[1] < 0 or p[1] > n - 1: return False return True def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ): '''simple docstring''' for itera in range(lowerCamelCase ): open_list[itera].remove_element(lowerCamelCase ) # print("s", s) # print("j", j) ((__lowercase) , (__lowercase)) = s __lowercase = (x - 1, y) __lowercase = (x + 1, y) __lowercase = (x, y + 1) __lowercase = (x, y - 1) for neighbours in [left, right, up, down]: if neighbours not in blocks: if valid(lowerCamelCase ) and neighbours not in visited: # print("neighbour", neighbours) visited.add(lowerCamelCase ) __lowercase = -1 __lowercase = float("""inf""" ) if valid(lowerCamelCase ) and g_function[neighbours] > g_function[s] + 1: __lowercase = g_function[s] + 1 __lowercase = s if neighbours not in close_list_anchor: open_list[0].put(lowerCamelCase , key(lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase ) ) if neighbours not in close_list_inad: for var in range(1 , lowerCamelCase ): if key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) <= Wa * key( lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase ): open_list[j].put( lowerCamelCase , key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) ) def snake_case ( ): '''simple docstring''' __lowercase = [] for x in range(1 , 5 ): for y in range(1 , 6 ): some_list.append((x, y) ) for x in range(15 , 20 ): some_list.append((x, 17) ) for x in range(10 , 19 ): for y in range(1 , 15 ): some_list.append((x, y) ) # L block for x in range(1 , 4 ): for y in range(12 , 19 ): some_list.append((x, y) ) for x in range(3 , 13 ): for y in range(16 , 19 ): some_list.append((x, y) ) return some_list __UpperCamelCase : Optional[int] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a} __UpperCamelCase : Optional[Any] = [ (0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1), (11, 1), (12, 1), (13, 1), (14, 1), (15, 1), (16, 1), (17, 1), (18, 1), (19, 1), ] __UpperCamelCase : Optional[Any] = make_common_ground() __UpperCamelCase : Dict = blocks_blk # hyper parameters __UpperCamelCase : Union[str, Any] = 1 __UpperCamelCase : Union[str, Any] = 1 __UpperCamelCase : Optional[int] = 20 __UpperCamelCase : List[str] = 3 # one consistent and two other inconsistent # start and end destination __UpperCamelCase : str = (0, 0) __UpperCamelCase : str = (n - 1, n - 1) __UpperCamelCase : Optional[Any] = 1 def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = {start: 0, goal: float("""inf""" )} __lowercase = {start: -1, goal: -1} __lowercase = [] __lowercase = set() for i in range(lowerCamelCase ): open_list.append(PriorityQueue() ) open_list[i].put(lowerCamelCase , key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) ) __lowercase = [] __lowercase = [] while open_list[0].minkey() < float("""inf""" ): for i in range(1 , lowerCamelCase ): # print(open_list[0].minkey(), open_list[i].minkey()) if open_list[i].minkey() <= Wa * open_list[0].minkey(): global t t += 1 if g_function[goal] <= open_list[i].minkey(): if g_function[goal] < float("""inf""" ): do_something(lowerCamelCase , lowerCamelCase , lowerCamelCase ) else: __lowercase , __lowercase = open_list[i].top_show() visited.add(lowerCamelCase ) expand_state( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) close_list_inad.append(lowerCamelCase ) else: if g_function[goal] <= open_list[0].minkey(): if g_function[goal] < float("""inf""" ): do_something(lowerCamelCase , lowerCamelCase , lowerCamelCase ) else: __lowercase = open_list[0].top_show() visited.add(lowerCamelCase ) expand_state( lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) close_list_anchor.append(lowerCamelCase ) print("""No path found to goal""" ) print() for i in range(n - 1 , -1 , -1 ): for j in range(lowerCamelCase ): if (j, i) in blocks: print("""#""" , end=""" """ ) elif (j, i) in back_pointer: if (j, i) == (n - 1, n - 1): print("""*""" , end=""" """ ) else: print("""-""" , end=""" """ ) else: print("""*""" , end=""" """ ) if (j, i) == (n - 1, n - 1): print("""<-- End position""" , end=""" """ ) print() print("""^""" ) print("""Start position""" ) print() print("""# is an obstacle""" ) print("""- is the path taken by algorithm""" ) if __name__ == "__main__": multi_a_star(start, goal, n_heuristic)
53
1
import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( """files""" , [ ["""full:README.md""", """dataset_infos.json"""], ["""empty:README.md""", """dataset_infos.json"""], ["""dataset_infos.json"""], ["""full:README.md"""], ] , ) def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = tmp_path_factory.mktemp("""dset_infos_dir""" ) if "full:README.md" in files: with open(dataset_infos_dir / """README.md""" , """w""" ) as f: f.write("""---\ndataset_info:\n dataset_size: 42\n---""" ) if "empty:README.md" in files: with open(dataset_infos_dir / """README.md""" , """w""" ) as f: f.write("""""" ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / """dataset_infos.json""" , """w""" ) as f: f.write("""{\"default\": {\"dataset_size\": 42}}""" ) __lowercase = DatasetInfosDict.from_directory(lowerCamelCase ) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( """dataset_info""" , [ DatasetInfo(), DatasetInfo( description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , ), ] , ) def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = str(lowerCamelCase ) dataset_info.write_to_directory(lowerCamelCase ) __lowercase = DatasetInfo.from_directory(lowerCamelCase ) assert dataset_info == reloaded assert os.path.exists(os.path.join(lowerCamelCase , """dataset_info.json""" ) ) def snake_case ( ): '''simple docstring''' __lowercase = DatasetInfo( description="""foo""" , citation="""bar""" , homepage="""https://foo.bar""" , license="""CC0""" , features=Features({"""a""": Value("""int32""" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train""", """num_examples""": 42}] , download_checksums={} , download_size=1_337 , post_processing_size=442 , dataset_size=1_234 , size_in_bytes=1_337 + 442 + 1_234 , ) __lowercase = dataset_info._to_yaml_dict() assert sorted(lowerCamelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) ) __lowercase = yaml.safe_dump(lowerCamelCase ) __lowercase = yaml.safe_load(lowerCamelCase ) assert dataset_info_yaml_dict == reloaded def snake_case ( ): '''simple docstring''' __lowercase = DatasetInfo() __lowercase = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( """dataset_infos_dict""" , [ DatasetInfosDict(), DatasetInfosDict({"""default""": DatasetInfo()} ), DatasetInfosDict({"""my_config_name""": DatasetInfo()} ), DatasetInfosDict( { """default""": DatasetInfo( description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , ) } ), DatasetInfosDict( { """v1""": DatasetInfo(dataset_size=42 ), """v2""": DatasetInfo(dataset_size=1_337 ), } ), ] , ) def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = str(lowerCamelCase ) dataset_infos_dict.write_to_directory(lowerCamelCase ) __lowercase = DatasetInfosDict.from_directory(lowerCamelCase ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): __lowercase = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml __lowercase = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(lowerCamelCase , """README.md""" ) )
53
import argparse import json import pickle from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() __UpperCamelCase : Optional[int] = logging.get_logger(__name__) def snake_case ( lowerCamelCase ): '''simple docstring''' __lowercase = SwinConfig.from_pretrained( """microsoft/swin-tiny-patch4-window7-224""" , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ) __lowercase = MaskFormerConfig(backbone_config=lowerCamelCase ) __lowercase = """huggingface/label-files""" if "ade20k-full" in model_name: # this should be ok __lowercase = 847 __lowercase = """maskformer-ade20k-full-id2label.json""" elif "ade" in model_name: # this should be ok __lowercase = 150 __lowercase = """ade20k-id2label.json""" elif "coco-stuff" in model_name: # this should be ok __lowercase = 171 __lowercase = """maskformer-coco-stuff-id2label.json""" elif "coco" in model_name: # TODO __lowercase = 133 __lowercase = """coco-panoptic-id2label.json""" elif "cityscapes" in model_name: # this should be ok __lowercase = 19 __lowercase = """cityscapes-id2label.json""" elif "vistas" in model_name: # this should be ok __lowercase = 65 __lowercase = """mapillary-vistas-id2label.json""" __lowercase = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""" ) , """r""" ) ) __lowercase = {int(lowerCamelCase ): v for k, v in idalabel.items()} return config def snake_case ( lowerCamelCase ): '''simple docstring''' __lowercase = [] # stem # fmt: off rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") ) rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") ) rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") ) rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') ) if i < 3: rename_keys.append((F'backbone.layers.{i}.downsample.reduction.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') ) rename_keys.append((F'backbone.layers.{i}.downsample.norm.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') ) rename_keys.append((F'backbone.layers.{i}.downsample.norm.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') ) rename_keys.append((F'backbone.norm{i}.weight', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') ) rename_keys.append((F'backbone.norm{i}.bias', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') ) # FPN rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") ) rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") ) rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") ) for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ): rename_keys.append((F'sem_seg_head.adapter_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') ) rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') ) rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') ) rename_keys.append((F'sem_seg_head.layer_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') ) rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') ) rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') ) rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") ) rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") ) # Transformer decoder for idx in range(config.decoder_config.decoder_layers ): # self-attention out projection rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') ) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') ) # cross-attention out projection rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') ) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') ) # MLP 1 rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', F'model.transformer_module.decoder.layers.{idx}.fc1.weight') ) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', F'model.transformer_module.decoder.layers.{idx}.fc1.bias') ) # MLP 2 rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', F'model.transformer_module.decoder.layers.{idx}.fc2.weight') ) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', F'model.transformer_module.decoder.layers.{idx}.fc2.bias') ) # layernorm 1 (self-attention layernorm) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') ) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') ) # layernorm 2 (cross-attention layernorm) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') ) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') ) # layernorm 3 (final layernorm) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') ) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') ) rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") ) rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") ) # heads on top rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") ) rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") ) rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") ) rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") ) rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") ) for i in range(3 ): rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.weight', F'mask_embedder.{i}.0.weight') ) rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.bias', F'mask_embedder.{i}.0.bias') ) # fmt: on return rename_keys def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = dct.pop(lowerCamelCase ) __lowercase = val def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): __lowercase = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) __lowercase = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' ) __lowercase = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict __lowercase = in_proj_weight[:dim, :] __lowercase = in_proj_bias[: dim] __lowercase = in_proj_weight[ dim : dim * 2, : ] __lowercase = in_proj_bias[ dim : dim * 2 ] __lowercase = in_proj_weight[ -dim :, : ] __lowercase = in_proj_bias[-dim :] # fmt: on def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = config.decoder_config.hidden_size for idx in range(config.decoder_config.decoder_layers ): # read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias) __lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' ) __lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict __lowercase = in_proj_weight[: hidden_size, :] __lowercase = in_proj_bias[:config.hidden_size] __lowercase = in_proj_weight[hidden_size : hidden_size * 2, :] __lowercase = in_proj_bias[hidden_size : hidden_size * 2] __lowercase = in_proj_weight[-hidden_size :, :] __lowercase = in_proj_bias[-hidden_size :] # read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias) __lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' ) __lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict __lowercase = in_proj_weight[: hidden_size, :] __lowercase = in_proj_bias[:config.hidden_size] __lowercase = in_proj_weight[hidden_size : hidden_size * 2, :] __lowercase = in_proj_bias[hidden_size : hidden_size * 2] __lowercase = in_proj_weight[-hidden_size :, :] __lowercase = in_proj_bias[-hidden_size :] # fmt: on def snake_case ( ): '''simple docstring''' __lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg""" __lowercase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw ) return im @torch.no_grad() def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = False ): '''simple docstring''' __lowercase = get_maskformer_config(lowerCamelCase ) # load original state_dict with open(lowerCamelCase , """rb""" ) as f: __lowercase = pickle.load(lowerCamelCase ) __lowercase = data["""model"""] # for name, param in state_dict.items(): # print(name, param.shape) # rename keys __lowercase = create_rename_keys(lowerCamelCase ) for src, dest in rename_keys: rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase ) read_in_swin_q_k_v(lowerCamelCase , config.backbone_config ) read_in_decoder_q_k_v(lowerCamelCase , lowerCamelCase ) # update to torch tensors for key, value in state_dict.items(): __lowercase = torch.from_numpy(lowerCamelCase ) # load 🤗 model __lowercase = MaskFormerForInstanceSegmentation(lowerCamelCase ) model.eval() for name, param in model.named_parameters(): print(lowerCamelCase , param.shape ) __lowercase , __lowercase = model.load_state_dict(lowerCamelCase , strict=lowerCamelCase ) assert missing_keys == [ "model.pixel_level_module.encoder.model.layernorm.weight", "model.pixel_level_module.encoder.model.layernorm.bias", ] assert len(lowerCamelCase ) == 0, F'Unexpected keys: {unexpected_keys}' # verify results __lowercase = prepare_img() if "vistas" in model_name: __lowercase = 65 elif "cityscapes" in model_name: __lowercase = 65_535 else: __lowercase = 255 __lowercase = True if """ade""" in model_name else False __lowercase = MaskFormerImageProcessor(ignore_index=lowerCamelCase , reduce_labels=lowerCamelCase ) __lowercase = image_processor(lowerCamelCase , return_tensors="""pt""" ) __lowercase = model(**lowerCamelCase ) print("""Logits:""" , outputs.class_queries_logits[0, :3, :3] ) if model_name == "maskformer-swin-tiny-ade": __lowercase = torch.tensor( [[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] ) assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase , atol=1e-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(F'Saving model and image processor to {pytorch_dump_folder_path}' ) Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase ) model.save_pretrained(lowerCamelCase ) image_processor.save_pretrained(lowerCamelCase ) if push_to_hub: print("""Pushing model and image processor to the hub...""" ) model.push_to_hub(F'nielsr/{model_name}' ) image_processor.push_to_hub(F'nielsr/{model_name}' ) if __name__ == "__main__": __UpperCamelCase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""maskformer-swin-tiny-ade""", type=str, help=("""Name of the MaskFormer model you'd like to convert""",), ) parser.add_argument( """--checkpoint_path""", default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""", type=str, help="""Path to the original state dict (.pth file).""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) __UpperCamelCase : List[Any] = parser.parse_args() convert_maskformer_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
53
1
import unittest import numpy as np from transformers import DistilBertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.distilbert.modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, ) class __UpperCamelCase ( unittest.TestCase ): def __init__( self : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : List[str]=13 , _lowerCAmelCase : List[str]=7 , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Any=True , _lowerCAmelCase : Tuple=99 , _lowerCAmelCase : Dict=32 , _lowerCAmelCase : Tuple=5 , _lowerCAmelCase : Dict=4 , _lowerCAmelCase : Tuple=37 , _lowerCAmelCase : Optional[int]="gelu" , _lowerCAmelCase : Dict=0.1 , _lowerCAmelCase : Union[str, Any]=0.1 , _lowerCAmelCase : Union[str, Any]=512 , _lowerCAmelCase : Optional[Any]=16 , _lowerCAmelCase : str=2 , _lowerCAmelCase : int=0.02 , _lowerCAmelCase : Optional[Any]=4 , ) -> List[Any]: """simple docstring""" __lowercase = parent __lowercase = batch_size __lowercase = seq_length __lowercase = is_training __lowercase = use_attention_mask __lowercase = use_token_type_ids __lowercase = use_labels __lowercase = vocab_size __lowercase = hidden_size __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = intermediate_size __lowercase = hidden_act __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = max_position_embeddings __lowercase = type_vocab_size __lowercase = type_sequence_label_size __lowercase = initializer_range __lowercase = num_choices def _a ( self : List[Any] ) -> Dict: """simple docstring""" __lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase = None if self.use_attention_mask: __lowercase = random_attention_mask([self.batch_size, self.seq_length] ) __lowercase = DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=_lowerCAmelCase , ) return config, input_ids, attention_mask def _a ( self : List[str] ) -> str: """simple docstring""" __lowercase = self.prepare_config_and_inputs() __lowercase , __lowercase , __lowercase = config_and_inputs __lowercase = {"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ): __snake_case :int = ( ( FlaxDistilBertModel, FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertForQuestionAnswering, ) if is_flax_available() else () ) def _a ( self : Union[str, Any] ) -> int: """simple docstring""" __lowercase = FlaxDistilBertModelTester(self ) @slow def _a ( self : Any ) -> Optional[Any]: """simple docstring""" for model_class_name in self.all_model_classes: __lowercase = model_class_name.from_pretrained("""distilbert-base-uncased""" ) __lowercase = model(np.ones((1, 1) ) ) self.assertIsNotNone(_lowerCAmelCase ) @require_flax class __UpperCamelCase ( unittest.TestCase ): @slow def _a ( self : List[Any] ) -> Tuple: """simple docstring""" __lowercase = FlaxDistilBertModel.from_pretrained("""distilbert-base-uncased""" ) __lowercase = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) __lowercase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) __lowercase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )[0] __lowercase = (1, 11, 768) self.assertEqual(output.shape , _lowerCAmelCase ) __lowercase = np.array([[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _lowerCAmelCase , atol=1e-4 ) )
53
from math import sqrt def snake_case ( lowerCamelCase ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(sqrt(lowerCamelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def snake_case ( lowerCamelCase = 10_001 ): '''simple docstring''' __lowercase = 0 __lowercase = 1 while count != nth and number < 3: number += 1 if is_prime(lowerCamelCase ): count += 1 while count != nth: number += 2 if is_prime(lowerCamelCase ): count += 1 return number if __name__ == "__main__": print(F'''{solution() = }''')
53
1
import math class __UpperCamelCase : def _a ( self : int , _lowerCAmelCase : list[list[float]] , _lowerCAmelCase : list[int] ) -> int: """simple docstring""" __lowercase = 0.0 __lowercase = 0.0 for i in range(len(_lowerCAmelCase ) ): da += math.pow((sample[i] - weights[0][i]) , 2 ) da += math.pow((sample[i] - weights[1][i]) , 2 ) return 0 if da > da else 1 return 0 def _a ( self : str , _lowerCAmelCase : list[list[int | float]] , _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : float ) -> list[list[int | float]]: """simple docstring""" for i in range(len(_lowerCAmelCase ) ): weights[j][i] += alpha * (sample[i] - weights[j][i]) return weights def snake_case ( ): '''simple docstring''' __lowercase = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] # weight initialization ( n, C ) __lowercase = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] # training __lowercase = SelfOrganizingMap() __lowercase = 3 __lowercase = 0.5 for _ in range(lowerCamelCase ): for j in range(len(lowerCamelCase ) ): # training sample __lowercase = training_samples[j] # Compute the winning vector __lowercase = self_organizing_map.get_winner(lowerCamelCase , lowerCamelCase ) # Update the winning vector __lowercase = self_organizing_map.update(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) # classify test sample __lowercase = [0, 0, 0, 1] __lowercase = self_organizing_map.get_winner(lowerCamelCase , lowerCamelCase ) # results print(F'Clusters that the test sample belongs to : {winner}' ) print(F'Weights that have been trained : {weights}' ) # running the main() function if __name__ == "__main__": main()
53
from __future__ import annotations import collections import tempfile import unittest import numpy as np from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import is_tf_available, is_vision_available from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_tf_bert import TFBertModelTester from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester from ..deit.test_modeling_tf_deit import TFDeiTModelTester from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester from ..vit.test_modeling_tf_vit import TFViTModelTester if is_tf_available(): from transformers import ( TFBertModel, TFCLIPVisionModel, TFDeiTModel, TFRobertaModel, TFVisionTextDualEncoderModel, TFViTModel, VisionTextDualEncoderConfig, ) if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor def snake_case ( lowerCamelCase ): '''simple docstring''' if isinstance(lowerCamelCase , collections.abc.Iterable ): return x return (x, x) @require_tf class __UpperCamelCase : def _a ( self : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] ) -> Union[str, Any]: """simple docstring""" pass def _a ( self : Dict ) -> Optional[int]: """simple docstring""" pass def _a ( self : Any ) -> Optional[Any]: """simple docstring""" pass def _a ( self : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict=None , **_lowerCAmelCase : int ) -> str: """simple docstring""" __lowercase = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCAmelCase , _lowerCAmelCase ) __lowercase = TFVisionTextDualEncoderModel(_lowerCAmelCase ) __lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) ) def _a ( self : str , _lowerCAmelCase : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : int=None , **_lowerCAmelCase : Dict ) -> Tuple: """simple docstring""" __lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase ) __lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase ) __lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) ) def _a ( self : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : str , _lowerCAmelCase : Any=None , **_lowerCAmelCase : str ) -> Tuple: """simple docstring""" __lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase ) __lowercase = {"""vision_model""": vision_model, """text_model""": text_model} __lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCAmelCase ) __lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) ) def _a ( self : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any]=None , **_lowerCAmelCase : Any ) -> Optional[int]: """simple docstring""" __lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase ) __lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase ) __lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase ) __lowercase = output[0].numpy() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_lowerCAmelCase ) __lowercase = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase ) __lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase ) __lowercase = after_output[0].numpy() __lowercase = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_lowerCAmelCase , 1e-5 ) def _a ( self : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int]=None , **_lowerCAmelCase : Optional[int] ) -> List[Any]: """simple docstring""" __lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase ) __lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase ) __lowercase = model( input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase ) __lowercase = output.vision_model_output.attentions self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) __lowercase = to_atuple(vision_model.config.image_size ) __lowercase = to_atuple(vision_model.config.patch_size ) __lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) __lowercase = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) __lowercase = output.text_model_output.attentions self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def _a ( self : List[Any] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : float ) -> Optional[int]: """simple docstring""" __lowercase = np.abs((a - b) ).max() self.assertLessEqual(_lowerCAmelCase , _lowerCAmelCase , F'Difference between torch and flax is {diff} (>= {tol}).' ) def _a ( self : List[Any] ) -> List[Any]: """simple docstring""" __lowercase = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_model(**_lowerCAmelCase ) def _a ( self : int ) -> List[Any]: """simple docstring""" __lowercase = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**_lowerCAmelCase ) def _a ( self : Union[str, Any] ) -> int: """simple docstring""" __lowercase = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**_lowerCAmelCase ) def _a ( self : List[str] ) -> str: """simple docstring""" __lowercase = self.prepare_config_and_inputs() self.check_save_load(**_lowerCAmelCase ) def _a ( self : Dict ) -> Union[str, Any]: """simple docstring""" __lowercase = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**_lowerCAmelCase ) @slow def _a ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" __lowercase , __lowercase = self.get_pretrained_model_and_inputs() __lowercase = model_a(**_lowerCAmelCase ) __lowercase = outputs[0].numpy() with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(_lowerCAmelCase ) __lowercase = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase ) __lowercase = model_a(**_lowerCAmelCase ) __lowercase = after_outputs[0].numpy() __lowercase = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_lowerCAmelCase , 1e-5 ) @require_tf class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ): def _a ( self : Optional[int] ) -> List[str]: """simple docstring""" __lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" ) __lowercase = 13 __lowercase = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) __lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) __lowercase = random_attention_mask([batch_size, 4] ) __lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def _a ( self : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] ) -> List[str]: """simple docstring""" __lowercase = TFViTModel(_lowerCAmelCase , name="""vision_model""" ) __lowercase = TFBertModel(_lowerCAmelCase , name="""text_model""" ) return vision_model, text_model def _a ( self : Dict ) -> Optional[Any]: """simple docstring""" __lowercase = TFViTModelTester(self ) __lowercase = TFBertModelTester(self ) __lowercase = vit_model_tester.prepare_config_and_inputs() __lowercase = bert_model_tester.prepare_config_and_inputs() __lowercase , __lowercase , __lowercase = vision_config_and_inputs ( ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ): def _a ( self : Tuple ) -> Any: """simple docstring""" __lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained( """Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" ) __lowercase = 13 __lowercase = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) __lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) __lowercase = random_attention_mask([batch_size, 4] ) __lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def _a ( self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : int=None , **_lowerCAmelCase : Tuple ) -> Dict: """simple docstring""" __lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase ) __lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase ) __lowercase = model( input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase ) __lowercase = output.vision_model_output.attentions self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers ) # in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) __lowercase = to_atuple(vision_model.config.image_size ) __lowercase = to_atuple(vision_model.config.patch_size ) __lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) __lowercase = num_patches + 2 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) __lowercase = output.text_model_output.attentions self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def _a ( self : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict ) -> int: """simple docstring""" __lowercase = TFDeiTModel(_lowerCAmelCase , name="""vision_model""" ) __lowercase = TFRobertaModel(_lowerCAmelCase , name="""text_model""" ) return vision_model, text_model def _a ( self : Tuple ) -> str: """simple docstring""" __lowercase = TFDeiTModelTester(self ) __lowercase = TFRobertaModelTester(self ) __lowercase = vit_model_tester.prepare_config_and_inputs() __lowercase = bert_model_tester.prepare_config_and_inputs() __lowercase , __lowercase , __lowercase = vision_config_and_inputs ( ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ): def _a ( self : int ) -> Union[str, Any]: """simple docstring""" __lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained( """Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" ) __lowercase = 13 __lowercase = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) __lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) __lowercase = random_attention_mask([batch_size, 4] ) __lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def _a ( self : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] ) -> Dict: """simple docstring""" __lowercase = TFCLIPVisionModel(_lowerCAmelCase , name="""vision_model""" ) __lowercase = TFBertModel(_lowerCAmelCase , name="""text_model""" ) return vision_model, text_model def _a ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" __lowercase = TFCLIPVisionModelTester(self ) __lowercase = TFBertModelTester(self ) __lowercase = clip_model_tester.prepare_config_and_inputs() __lowercase = bert_model_tester.prepare_config_and_inputs() __lowercase , __lowercase = vision_config_and_inputs ( ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_vision @require_tf class __UpperCamelCase ( unittest.TestCase ): @slow def _a ( self : int ) -> Tuple: """simple docstring""" __lowercase = TFVisionTextDualEncoderModel.from_pretrained( """clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=_lowerCAmelCase ) __lowercase = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" ) __lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) __lowercase = processor( text=["""una foto di un gatto""", """una foto di un cane"""] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="""np""" ) __lowercase = model(**_lowerCAmelCase ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) __lowercase = np.array([[1.2_284_727, 0.3_104_122]] ) self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _lowerCAmelCase , atol=1e-3 ) )
53
1
def snake_case ( lowerCamelCase ): '''simple docstring''' return credit_card_number.startswith(("""34""", """35""", """37""", """4""", """5""", """6""") ) def snake_case ( lowerCamelCase ): '''simple docstring''' __lowercase = credit_card_number __lowercase = 0 __lowercase = len(lowerCamelCase ) - 2 for i in range(lowerCamelCase , -1 , -2 ): # double the value of every second digit __lowercase = int(cc_number[i] ) digit *= 2 # If doubling of a number results in a two digit number # i.e greater than 9(e.g., 6 × 2 = 12), # then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6), # to get a single digit number. if digit > 9: digit %= 10 digit += 1 __lowercase = cc_number[:i] + str(lowerCamelCase ) + cc_number[i + 1 :] total += digit # Sum up the remaining digits for i in range(len(lowerCamelCase ) - 1 , -1 , -2 ): total += int(cc_number[i] ) return total % 10 == 0 def snake_case ( lowerCamelCase ): '''simple docstring''' __lowercase = F'{credit_card_number} is an invalid credit card number because' if not credit_card_number.isdigit(): print(F'{error_message} it has nonnumerical characters.' ) return False if not 13 <= len(lowerCamelCase ) <= 16: print(F'{error_message} of its length.' ) return False if not validate_initial_digits(lowerCamelCase ): print(F'{error_message} of its first two digits.' ) return False if not luhn_validation(lowerCamelCase ): print(F'{error_message} it fails the Luhn check.' ) return False print(F'{credit_card_number} is a valid credit card number.' ) return True if __name__ == "__main__": import doctest doctest.testmod() validate_credit_card_number("""4111111111111111""") validate_credit_card_number("""32323""")
53
from __future__ import annotations from scipy.special import comb # type: ignore class __UpperCamelCase : def __init__( self : int , _lowerCAmelCase : list[tuple[float, float]] ) -> Any: """simple docstring""" __lowercase = list_of_points # Degree determines the flexibility of the curve. # Degree = 1 will produce a straight line. __lowercase = len(_lowerCAmelCase ) - 1 def _a ( self : Tuple , _lowerCAmelCase : float ) -> list[float]: """simple docstring""" assert 0 <= t <= 1, "Time t must be between 0 and 1." __lowercase = [] for i in range(len(self.list_of_points ) ): # basis function for each i output_values.append( comb(self.degree , _lowerCAmelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) ) # the basis must sum up to 1 for it to produce a valid Bezier curve. assert round(sum(_lowerCAmelCase ) , 5 ) == 1 return output_values def _a ( self : List[str] , _lowerCAmelCase : float ) -> tuple[float, float]: """simple docstring""" assert 0 <= t <= 1, "Time t must be between 0 and 1." __lowercase = self.basis_function(_lowerCAmelCase ) __lowercase = 0.0 __lowercase = 0.0 for i in range(len(self.list_of_points ) ): # For all points, sum up the product of i-th basis function and i-th point. x += basis_function[i] * self.list_of_points[i][0] y += basis_function[i] * self.list_of_points[i][1] return (x, y) def _a ( self : Optional[int] , _lowerCAmelCase : float = 0.01 ) -> Union[str, Any]: """simple docstring""" from matplotlib import pyplot as plt # type: ignore __lowercase = [] # x coordinates of points to plot __lowercase = [] # y coordinates of points to plot __lowercase = 0.0 while t <= 1: __lowercase = self.bezier_curve_function(_lowerCAmelCase ) to_plot_x.append(value[0] ) to_plot_y.append(value[1] ) t += step_size __lowercase = [i[0] for i in self.list_of_points] __lowercase = [i[1] for i in self.list_of_points] plt.plot( _lowerCAmelCase , _lowerCAmelCase , color="""blue""" , label="""Curve of Degree """ + str(self.degree ) , ) plt.scatter(_lowerCAmelCase , _lowerCAmelCase , color="""red""" , label="""Control Points""" ) plt.legend() plt.show() if __name__ == "__main__": import doctest doctest.testmod() BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1 BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2 BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
53
1
from manim import * class __UpperCamelCase ( _lowerCAmelCase ): def _a ( self : List[Any] ) -> Optional[Any]: """simple docstring""" __lowercase = Rectangle(height=0.5 , width=0.5 ) __lowercase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) __lowercase = [mem.copy() for i in range(6 )] __lowercase = [mem.copy() for i in range(6 )] __lowercase = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 ) __lowercase = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 ) __lowercase = VGroup(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 ) __lowercase = Text("""CPU""" , font_size=24 ) __lowercase = Group(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0.5 , aligned_edge=_lowerCAmelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(_lowerCAmelCase ) __lowercase = [mem.copy() for i in range(1 )] __lowercase = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 ) __lowercase = Text("""GPU""" , font_size=24 ) __lowercase = Group(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0.5 , aligned_edge=_lowerCAmelCase ) gpu.align_to(_lowerCAmelCase , _lowerCAmelCase ) gpu.set_x(gpu.get_x() - 1 ) self.add(_lowerCAmelCase ) __lowercase = [mem.copy() for i in range(6 )] __lowercase = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 ) __lowercase = Text("""Model""" , font_size=24 ) __lowercase = Group(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0.5 , aligned_edge=_lowerCAmelCase ) model.move_to([3, -1.0, 0] ) self.play( Create(_lowerCAmelCase , run_time=1 ) , Create(_lowerCAmelCase , run_time=1 ) , Create(_lowerCAmelCase , run_time=1 ) , ) __lowercase = MarkupText( F'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' , font_size=24 , ) __lowercase = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) __lowercase = MarkupText( F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) step_a.move_to([2, 2, 0] ) self.play(Write(_lowerCAmelCase , run_time=2.5 ) , Write(_lowerCAmelCase ) , Write(_lowerCAmelCase ) ) self.add(_lowerCAmelCase ) __lowercase = [] __lowercase = [] __lowercase = [] for i, rect in enumerate(_lowerCAmelCase ): __lowercase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_lowerCAmelCase , opacity=0.7 ) cpu_target.move_to(_lowerCAmelCase ) cpu_target.generate_target() __lowercase = 0.46 / 4 __lowercase = 0.46 / 3 if i == 0: cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_lowerCAmelCase ) cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 ) elif i == 3: cpu_target.target.next_to(cpu_targs[0].target , direction=_lowerCAmelCase , buff=0.0 ) else: cpu_target.target.next_to(cpu_targs[i - 1].target , direction=_lowerCAmelCase , buff=0.0 ) cpu_targs.append(_lowerCAmelCase ) first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_lowerCAmelCase ) ) second_animations.append(MoveToTarget(_lowerCAmelCase , run_time=1.5 ) ) self.play(*_lowerCAmelCase ) self.play(*_lowerCAmelCase ) self.wait()
53
import inspect import unittest from typing import List import numpy as np from transformers import EfficientFormerConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, ) from transformers.models.efficientformer.modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_vision_available(): from PIL import Image from transformers import EfficientFormerImageProcessor class __UpperCamelCase : def __init__( self : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : int = 13 , _lowerCAmelCase : int = 64 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 3 , _lowerCAmelCase : int = 3 , _lowerCAmelCase : bool = True , _lowerCAmelCase : bool = True , _lowerCAmelCase : int = 128 , _lowerCAmelCase : Optional[int]=[16, 32, 64, 128] , _lowerCAmelCase : int = 7 , _lowerCAmelCase : int = 4 , _lowerCAmelCase : int = 37 , _lowerCAmelCase : str = "gelu" , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : int = 10 , _lowerCAmelCase : float = 0.02 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 128 , _lowerCAmelCase : List[int] = [2, 2, 2, 2] , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 2 , ) -> Tuple: """simple docstring""" __lowercase = parent __lowercase = batch_size __lowercase = image_size __lowercase = patch_size __lowercase = num_channels __lowercase = is_training __lowercase = use_labels __lowercase = hidden_size __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = intermediate_size __lowercase = hidden_act __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = type_sequence_label_size __lowercase = initializer_range __lowercase = encoder_stride __lowercase = num_attention_outputs __lowercase = embed_dim __lowercase = embed_dim + 1 __lowercase = resolution __lowercase = depths __lowercase = hidden_sizes __lowercase = dim __lowercase = mlp_expansion_ratio def _a ( self : List[Any] ) -> Optional[int]: """simple docstring""" __lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowercase = None if self.use_labels: __lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase = self.get_config() return config, pixel_values, labels def _a ( self : Optional[Any] ) -> str: """simple docstring""" return EfficientFormerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , ) def _a ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] ) -> Optional[int]: """simple docstring""" __lowercase = TFEfficientFormerModel(config=_lowerCAmelCase ) __lowercase = model(_lowerCAmelCase , training=_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict ) -> Optional[Any]: """simple docstring""" __lowercase = self.type_sequence_label_size __lowercase = TFEfficientFormerForImageClassification(_lowerCAmelCase ) __lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images __lowercase = 1 __lowercase = TFEfficientFormerForImageClassification(_lowerCAmelCase ) __lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _a ( self : Optional[Any] ) -> Any: """simple docstring""" __lowercase = self.prepare_config_and_inputs() __lowercase , __lowercase , __lowercase = config_and_inputs __lowercase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): __snake_case :Any = ( ( TFEfficientFormerModel, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerForImageClassification, ) if is_tf_available() else () ) __snake_case :Any = ( { 'feature-extraction': TFEfficientFormerModel, 'image-classification': ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, ), } if is_tf_available() else {} ) __snake_case :int = False __snake_case :Optional[int] = False __snake_case :int = False __snake_case :Any = False __snake_case :Any = False def _a ( self : Tuple ) -> Tuple: """simple docstring""" __lowercase = TFEfficientFormerModelTester(self ) __lowercase = ConfigTester( self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 ) def _a ( self : Optional[int] ) -> int: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="""EfficientFormer does not use inputs_embeds""" ) def _a ( self : Optional[int] ) -> Any: """simple docstring""" pass @unittest.skip(reason="""EfficientFormer does not support input and output embeddings""" ) def _a ( self : int ) -> str: """simple docstring""" pass def _a ( self : Union[str, Any] ) -> List[str]: """simple docstring""" __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase = model_class(_lowerCAmelCase ) __lowercase = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase = [*signature.parameters.keys()] __lowercase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , _lowerCAmelCase ) def _a ( self : Union[str, Any] ) -> Dict: """simple docstring""" def check_hidden_states_output(_lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] ): __lowercase = model_class(_lowerCAmelCase ) __lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase ) __lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __lowercase = getattr( self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase ) if hasattr(self.model_tester , """encoder_seq_length""" ): __lowercase = self.model_tester.encoder_seq_length if hasattr(self.model_tester , """chunk_length""" ) and self.model_tester.chunk_length > 1: __lowercase = seq_length * self.model_tester.chunk_length else: __lowercase = self.model_tester.seq_length self.assertListEqual( list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) if config.is_encoder_decoder: __lowercase = outputs.decoder_hidden_states self.asseretIsInstance(_lowerCAmelCase , (list, tuple) ) self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase ) __lowercase = getattr(self.model_tester , """seq_length""" , _lowerCAmelCase ) __lowercase = getattr(self.model_tester , """decoder_seq_length""" , _lowerCAmelCase ) self.assertListEqual( list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , ) __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase = True check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowercase = True check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) def _a ( self : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any]=False ) -> Dict: """simple docstring""" __lowercase = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase ) if return_labels: if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def _a ( self : int ) -> int: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) @unittest.skip(reason="""EfficientFormer does not implement masked image modeling yet""" ) def _a ( self : Union[str, Any] ) -> str: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCAmelCase ) def _a ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase ) @slow def _a ( self : List[str] ) -> List[Any]: """simple docstring""" for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase = TFEfficientFormerModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) def _a ( self : Any ) -> List[str]: """simple docstring""" __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() __lowercase = True __lowercase = getattr(self.model_tester , """seq_length""" , _lowerCAmelCase ) __lowercase = getattr(self.model_tester , """encoder_seq_length""" , _lowerCAmelCase ) __lowercase = getattr(self.model_tester , """key_length""" , _lowerCAmelCase ) __lowercase = getattr(self.model_tester , """chunk_length""" , _lowerCAmelCase ) if chunk_length is not None and hasattr(self.model_tester , """num_hashes""" ): __lowercase = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: __lowercase = True __lowercase = False __lowercase = True __lowercase = model_class(_lowerCAmelCase ) __lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase ) __lowercase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_attention_outputs ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __lowercase = True __lowercase = model_class(_lowerCAmelCase ) __lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase ) __lowercase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_attention_outputs ) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , ) else: self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , ) def _a ( self : Dict ) -> Optional[int]: """simple docstring""" __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Prepare our model __lowercase = model_class(_lowerCAmelCase ) # These are maximally general inputs for the model, with multiple None dimensions # Hopefully this will catch any conditionals that fail for flexible shapes __lowercase = { key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=_lowerCAmelCase ) for key, val in model.input_signature.items() if key in model.dummy_inputs } __lowercase = model(_lowerCAmelCase ) self.assertTrue(outputs_dict is not None ) def snake_case ( ): '''simple docstring''' __lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class __UpperCamelCase ( unittest.TestCase ): @cached_property def _a ( self : Optional[Any] ) -> Any: """simple docstring""" return ( EfficientFormerImageProcessor.from_pretrained("""snap-research/efficientformer-l1-300""" ) if is_vision_available() else None ) @slow def _a ( self : Optional[Any] ) -> Any: """simple docstring""" __lowercase = TFEfficientFormerForImageClassification.from_pretrained("""snap-research/efficientformer-l1-300""" ) __lowercase = self.default_image_processor __lowercase = prepare_img() __lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""tf""" ) # forward pass __lowercase = model(**_lowerCAmelCase , training=_lowerCAmelCase ) # verify the logits __lowercase = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , _lowerCAmelCase ) __lowercase = tf.constant([-0.0_555, 0.4_825, -0.0_852] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) ) @slow def _a ( self : Optional[Any] ) -> Tuple: """simple docstring""" __lowercase = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained( """snap-research/efficientformer-l1-300""" ) __lowercase = self.default_image_processor __lowercase = prepare_img() __lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""tf""" ) # forward pass __lowercase = model(**_lowerCAmelCase , training=_lowerCAmelCase ) # verify the logits __lowercase = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , _lowerCAmelCase ) __lowercase = tf.constant([-0.1_312, 0.4_353, -1.0_499] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
53
1
import json import logging import os import re import sys from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Union import datasets import numpy as np import torch import torchaudio from packaging import version from torch import nn import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaProcessor, is_apex_available, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process if is_apex_available(): from apex import amp if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""): __UpperCamelCase : List[Any] = True from torch.cuda.amp import autocast __UpperCamelCase : Tuple = logging.getLogger(__name__) def snake_case ( lowerCamelCase=None , lowerCamelCase=None ): '''simple docstring''' return field(default_factory=lambda: default , metadata=lowerCamelCase ) @dataclass class __UpperCamelCase : __snake_case :str = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) __snake_case :Optional[str] = field( default=_lowerCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) __snake_case :Optional[bool] = field( default=_lowerCAmelCase , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} ) __snake_case :Optional[float] = field( default=0.1 , metadata={'help': 'The dropout ratio for the attention probabilities.'} ) __snake_case :Optional[float] = field( default=0.1 , metadata={'help': 'The dropout ratio for activations inside the fully connected layer.'} ) __snake_case :Optional[float] = field( default=0.1 , metadata={ 'help': 'The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.' } , ) __snake_case :Optional[float] = field( default=0.1 , metadata={'help': 'The dropout probabilitiy for all 1D convolutional layers in feature extractor.'} , ) __snake_case :Optional[float] = field( default=0.05 , metadata={ 'help': ( 'Propability of each feature vector along the time axis to be chosen as the start of the vector' 'span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature' 'vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``.' ) } , ) __snake_case :Optional[float] = field(default=0.0 , metadata={'help': 'The LayerDrop probability.'} ) @dataclass class __UpperCamelCase : __snake_case :Optional[str] = field( default=_lowerCAmelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} ) __snake_case :Optional[str] = field( default='train+validation' , metadata={ 'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\'' } , ) __snake_case :bool = field( default=_lowerCAmelCase , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} ) __snake_case :Optional[int] = field( default=_lowerCAmelCase , metadata={'help': 'The number of processes to use for the preprocessing.'} , ) __snake_case :Optional[int] = field( default=_lowerCAmelCase , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of training examples to this ' 'value if set.' ) } , ) __snake_case :Optional[int] = field( default=_lowerCAmelCase , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of validation examples to this ' 'value if set.' ) } , ) __snake_case :List[str] = list_field( default=[',', '?', '.', '!', '-', ';', ':', '""', '%', '\'', '"', '�'] , metadata={'help': 'A list of characters to remove from the transcripts.'} , ) @dataclass class __UpperCamelCase : __snake_case :WavaVecaProcessor __snake_case :Union[bool, str] = True __snake_case :Optional[int] = None __snake_case :Optional[int] = None __snake_case :Optional[int] = None __snake_case :Optional[int] = None def __call__( self : Tuple , _lowerCAmelCase : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]: """simple docstring""" __lowercase = [{"""input_values""": feature["""input_values"""]} for feature in features] __lowercase = [{"""input_ids""": feature["""labels"""]} for feature in features] __lowercase = self.processor.pad( _lowerCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , ) __lowercase = self.processor.pad( labels=_lowerCAmelCase , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors="""pt""" , ) # replace padding with -100 to ignore loss correctly __lowercase = labels_batch["""input_ids"""].masked_fill(labels_batch.attention_mask.ne(1 ) , -100 ) __lowercase = labels return batch class __UpperCamelCase ( _lowerCAmelCase ): def _a ( self : List[Any] , _lowerCAmelCase : nn.Module , _lowerCAmelCase : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor: """simple docstring""" model.train() __lowercase = self._prepare_inputs(_lowerCAmelCase ) if self.use_amp: with autocast(): __lowercase = self.compute_loss(_lowerCAmelCase , _lowerCAmelCase ) else: __lowercase = self.compute_loss(_lowerCAmelCase , _lowerCAmelCase ) if self.args.n_gpu > 1: if model.module.config.ctc_loss_reduction == "mean": __lowercase = loss.mean() elif model.module.config.ctc_loss_reduction == "sum": __lowercase = loss.sum() / (inputs["""labels"""] >= 0).sum() else: raise ValueError(F'{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']' ) if self.args.gradient_accumulation_steps > 1: __lowercase = loss / self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(_lowerCAmelCase ).backward() elif self.use_apex: with amp.scale_loss(_lowerCAmelCase , self.optimizer ) as scaled_loss: scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(_lowerCAmelCase ) else: loss.backward() return loss.detach() def snake_case ( ): '''simple docstring''' __lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __lowercase , __lowercase , __lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses() # Detecting last checkpoint. __lowercase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __lowercase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'Output directory ({training_args.output_dir}) already exists and is not empty. ' """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None: logger.info( F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN ) # Log on each process the small summary: logger.warning( F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info("""Training/evaluation parameters %s""" , lowerCamelCase ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: __lowercase = datasets.load_dataset( """common_voice""" , data_args.dataset_config_name , split=data_args.train_split_name ) __lowercase = datasets.load_dataset("""common_voice""" , data_args.dataset_config_name , split="""test""" ) # Create and save tokenizer __lowercase = F'[{"".join(data_args.chars_to_ignore )}]' def remove_special_characters(lowerCamelCase ): __lowercase = re.sub(lowerCamelCase , """""" , batch["""sentence"""] ).lower() + """ """ return batch __lowercase = train_dataset.map(lowerCamelCase , remove_columns=["""sentence"""] ) __lowercase = eval_dataset.map(lowerCamelCase , remove_columns=["""sentence"""] ) def extract_all_chars(lowerCamelCase ): __lowercase = """ """.join(batch["""text"""] ) __lowercase = list(set(lowerCamelCase ) ) return {"vocab": [vocab], "all_text": [all_text]} __lowercase = train_dataset.map( lowerCamelCase , batched=lowerCamelCase , batch_size=-1 , keep_in_memory=lowerCamelCase , remove_columns=train_dataset.column_names , ) __lowercase = train_dataset.map( lowerCamelCase , batched=lowerCamelCase , batch_size=-1 , keep_in_memory=lowerCamelCase , remove_columns=eval_dataset.column_names , ) __lowercase = list(set(vocab_train["""vocab"""][0] ) | set(vocab_test["""vocab"""][0] ) ) __lowercase = {v: k for k, v in enumerate(lowerCamelCase )} __lowercase = vocab_dict[""" """] del vocab_dict[" "] __lowercase = len(lowerCamelCase ) __lowercase = len(lowerCamelCase ) with open("""vocab.json""" , """w""" ) as vocab_file: json.dump(lowerCamelCase , lowerCamelCase ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __lowercase = WavaVecaCTCTokenizer( """vocab.json""" , unk_token="""[UNK]""" , pad_token="""[PAD]""" , word_delimiter_token="""|""" , ) __lowercase = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0.0 , do_normalize=lowerCamelCase , return_attention_mask=lowerCamelCase ) __lowercase = WavaVecaProcessor(feature_extractor=lowerCamelCase , tokenizer=lowerCamelCase ) __lowercase = WavaVecaForCTC.from_pretrained( model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction="""mean""" , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , ) if data_args.max_train_samples is not None: __lowercase = min(len(lowerCamelCase ) , data_args.max_train_samples ) __lowercase = train_dataset.select(range(lowerCamelCase ) ) if data_args.max_val_samples is not None: __lowercase = eval_dataset.select(range(data_args.max_val_samples ) ) __lowercase = torchaudio.transforms.Resample(48_000 , 16_000 ) # Preprocessing the datasets. # We need to read the aduio files as arrays and tokenize the targets. def speech_file_to_array_fn(lowerCamelCase ): __lowercase , __lowercase = torchaudio.load(batch["""path"""] ) __lowercase = resampler(lowerCamelCase ).squeeze().numpy() __lowercase = 16_000 __lowercase = batch["""text"""] return batch __lowercase = train_dataset.map( lowerCamelCase , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , ) __lowercase = eval_dataset.map( lowerCamelCase , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , ) def prepare_dataset(lowerCamelCase ): # check that all files have the correct sampling rate assert ( len(set(batch["""sampling_rate"""] ) ) == 1 ), F'Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.' __lowercase = processor( audio=batch["""speech"""] , text=batch["""target_text"""] , sampling_rate=batch["""sampling_rate"""][0] ) batch.update(lowerCamelCase ) return batch __lowercase = train_dataset.map( lowerCamelCase , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , ) __lowercase = eval_dataset.map( lowerCamelCase , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , ) # Metric __lowercase = datasets.load_metric("""wer""" ) def compute_metrics(lowerCamelCase ): __lowercase = pred.predictions __lowercase = np.argmax(lowerCamelCase , axis=-1 ) __lowercase = processor.tokenizer.pad_token_id __lowercase = processor.batch_decode(lowerCamelCase ) # we do not want to group tokens when computing the metrics __lowercase = processor.batch_decode(pred.label_ids , group_tokens=lowerCamelCase ) __lowercase = wer_metric.compute(predictions=lowerCamelCase , references=lowerCamelCase ) return {"wer": wer} if model_args.freeze_feature_extractor: model.freeze_feature_extractor() # Data collator __lowercase = DataCollatorCTCWithPadding(processor=lowerCamelCase , padding=lowerCamelCase ) # Initialize our Trainer __lowercase = CTCTrainer( model=lowerCamelCase , data_collator=lowerCamelCase , args=lowerCamelCase , compute_metrics=lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , ) # Training if training_args.do_train: if last_checkpoint is not None: __lowercase = last_checkpoint elif os.path.isdir(model_args.model_name_or_path ): __lowercase = model_args.model_name_or_path else: __lowercase = None # Save the feature_extractor and the tokenizer if is_main_process(training_args.local_rank ): processor.save_pretrained(training_args.output_dir ) __lowercase = trainer.train(resume_from_checkpoint=lowerCamelCase ) trainer.save_model() __lowercase = train_result.metrics __lowercase = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase ) ) __lowercase = min(lowerCamelCase , len(lowerCamelCase ) ) trainer.log_metrics("""train""" , lowerCamelCase ) trainer.save_metrics("""train""" , lowerCamelCase ) trainer.save_state() # Evaluation __lowercase = {} if training_args.do_eval: logger.info("""*** Evaluate ***""" ) __lowercase = trainer.evaluate() __lowercase = data_args.max_val_samples if data_args.max_val_samples is not None else len(lowerCamelCase ) __lowercase = min(lowerCamelCase , len(lowerCamelCase ) ) trainer.log_metrics("""eval""" , lowerCamelCase ) trainer.save_metrics("""eval""" , lowerCamelCase ) return results if __name__ == "__main__": main()
53
import argparse import json import os import re import shutil import torch from transformers import BioGptConfig, BioGptForCausalLM from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() __UpperCamelCase : Tuple = 2 class __UpperCamelCase : def __init__( self : List[str] , *, # begin keyword-only arguments _lowerCAmelCase : Optional[int]="<s>" , _lowerCAmelCase : Optional[int]="<pad>" , _lowerCAmelCase : int="</s>" , _lowerCAmelCase : str="<unk>" , _lowerCAmelCase : List[str]=None , ) -> Tuple: """simple docstring""" __lowercase , __lowercase , __lowercase , __lowercase = bos, unk, pad, eos __lowercase = [] __lowercase = [] __lowercase = {} __lowercase = self.add_symbol(_lowerCAmelCase ) __lowercase = self.add_symbol(_lowerCAmelCase ) __lowercase = self.add_symbol(_lowerCAmelCase ) __lowercase = self.add_symbol(_lowerCAmelCase ) if extra_special_symbols: for s in extra_special_symbols: self.add_symbol(_lowerCAmelCase ) __lowercase = len(self.symbols ) def __eq__( self : Dict , _lowerCAmelCase : List[str] ) -> Any: """simple docstring""" return self.indices == other.indices def __getitem__( self : Any , _lowerCAmelCase : str ) -> Dict: """simple docstring""" if idx < len(self.symbols ): return self.symbols[idx] return self.unk_word def __len__( self : str ) -> List[str]: """simple docstring""" return len(self.symbols ) def __contains__( self : Union[str, Any] , _lowerCAmelCase : List[Any] ) -> Optional[int]: """simple docstring""" return sym in self.indices @classmethod def _a ( cls : Dict , _lowerCAmelCase : Optional[int] ) -> Union[str, Any]: """simple docstring""" __lowercase = cls() d.add_from_file(_lowerCAmelCase ) return d def _a ( self : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any]=1 , _lowerCAmelCase : Optional[int]=False ) -> Union[str, Any]: """simple docstring""" if word in self.indices and not overwrite: __lowercase = self.indices[word] __lowercase = self.count[idx] + n return idx else: __lowercase = len(self.symbols ) __lowercase = idx self.symbols.append(_lowerCAmelCase ) self.count.append(_lowerCAmelCase ) return idx def _a ( self : List[str] , _lowerCAmelCase : Optional[int] ) -> Optional[Any]: """simple docstring""" return 0 def _a ( self : Optional[Any] , _lowerCAmelCase : Dict ) -> str: """simple docstring""" if isinstance(_lowerCAmelCase , _lowerCAmelCase ): try: with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" ) as fd: self.add_from_file(_lowerCAmelCase ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception("""Incorrect encoding detected in {}, please rebuild the dataset""".format(_lowerCAmelCase ) ) return __lowercase = f.readlines() __lowercase = self._load_meta(_lowerCAmelCase ) for line in lines[indices_start_line:]: try: __lowercase , __lowercase = line.rstrip().rsplit(""" """ , 1 ) if field == "#fairseq:overwrite": __lowercase = True __lowercase , __lowercase = line.rsplit(""" """ , 1 ) else: __lowercase = False __lowercase = int(_lowerCAmelCase ) __lowercase = line if word in self and not overwrite: raise RuntimeError( """Duplicate word found when loading Dictionary: '{}'. """ """Duplicate words can overwrite earlier ones by adding the """ """#fairseq:overwrite flag at the end of the corresponding row """ """in the dictionary file. If using the Camembert model, please """ """download an updated copy of the model file.""".format(_lowerCAmelCase ) ) self.add_symbol(_lowerCAmelCase , n=_lowerCAmelCase , overwrite=_lowerCAmelCase ) except ValueError: raise ValueError("""Incorrect dictionary format, expected '<token> <cnt> [flags]'""" ) def snake_case ( lowerCamelCase ): '''simple docstring''' __lowercase = dict((re.sub(r"""@@$""" , """""" , lowerCamelCase ), v) if k.endswith("""@@""" ) else (re.sub(r"""$""" , """</w>""" , lowerCamelCase ), v) for k, v in d.items() ) __lowercase = """<s> <pad> </s> <unk>""".split() # restore the special tokens for k in keep_keys: del da[F'{k}</w>'] __lowercase = d[k] # restore return da def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' if not os.path.exists(lowerCamelCase ): raise ValueError(F'path {biogpt_checkpoint_path} does not exist!' ) os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase ) print(F'Writing results to {pytorch_dump_folder_path}' ) # handle various types of models __lowercase = os.path.join(lowerCamelCase , """checkpoint.pt""" ) if not os.path.isfile(lowerCamelCase ): raise ValueError(F'path to the file {checkpoint_file} does not exist!' ) __lowercase = torch.load(lowerCamelCase , map_location="""cpu""" ) __lowercase = chkpt["""cfg"""]["""model"""] # dicts __lowercase = os.path.join(lowerCamelCase , """dict.txt""" ) if not os.path.isfile(lowerCamelCase ): raise ValueError(F'path to the file {dict_file} does not exist!' ) __lowercase = Dictionary.load(lowerCamelCase ) __lowercase = rewrite_dict_keys(src_dict.indices ) __lowercase = len(lowerCamelCase ) __lowercase = os.path.join(lowerCamelCase , VOCAB_FILES_NAMES["""vocab_file"""] ) print(F'Generating {src_vocab_file} of {src_vocab_size} records' ) with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) ) # merges_file (bpecodes) __lowercase = os.path.join(lowerCamelCase , """bpecodes""" ) if not os.path.isfile(lowerCamelCase ): raise ValueError(F'path to the file {bpecodes_file} does not exist!' ) __lowercase = os.path.join(lowerCamelCase , VOCAB_FILES_NAMES["""merges_file"""] ) shutil.copyfile(lowerCamelCase , lowerCamelCase ) # model config __lowercase = os.path.join(lowerCamelCase , """config.json""" ) __lowercase = { """activation_dropout""": args["""activation_dropout"""], """architectures""": ["""BioGptForCausalLM"""], """attention_probs_dropout_prob""": args["""attention_dropout"""], """bos_token_id""": 0, """eos_token_id""": 2, """hidden_act""": args["""activation_fn"""], """hidden_dropout_prob""": args["""dropout"""], """hidden_size""": args["""decoder_embed_dim"""], """initializer_range""": 0.02, """intermediate_size""": args["""decoder_ffn_embed_dim"""], """layer_norm_eps""": 1e-12, """layerdrop""": args["""decoder_layerdrop"""], """max_position_embeddings""": args["""max_target_positions"""], """model_type""": """biogpt""", """num_attention_heads""": args["""decoder_attention_heads"""], """num_hidden_layers""": args["""decoder_layers"""], """pad_token_id""": 1, """scale_embedding""": not args["""no_scale_embedding"""], """tie_word_embeddings""": args["""share_decoder_input_output_embed"""], """vocab_size""": src_vocab_size, } # good hparam defaults to start with print(F'Generating {biogpt_model_config_file}' ) with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) ) # tokenizer config __lowercase = os.path.join(lowerCamelCase , lowerCamelCase ) __lowercase = { """bos_token""": """<s>""", """eos_token""": """</s>""", """model_max_length""": 1_024, """pad_token""": """<pad>""", """special_tokens_map_file""": None, """tokenizer_class""": """BioGptTokenizer""", """unk_token""": """<unk>""", } print(F'Generating {biogpt_tokenizer_config_file}' ) with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) ) # model __lowercase = chkpt["""model"""] # remove unneeded keys __lowercase = [ """decoder.version""", ] for k in ignore_keys: model_state_dict.pop(lowerCamelCase , lowerCamelCase ) __lowercase = list(model_state_dict.keys() ) for layer_name in layer_names: if layer_name.endswith("""output_projection.weight""" ): __lowercase = model_state_dict.pop(lowerCamelCase ) else: __lowercase = model_state_dict.pop(lowerCamelCase ) __lowercase = BioGptConfig.from_pretrained(lowerCamelCase ) __lowercase = BioGptForCausalLM(lowerCamelCase ) # check that it loads ok model_new.load_state_dict(lowerCamelCase ) # save __lowercase = os.path.join(lowerCamelCase , lowerCamelCase ) print(F'Generating {pytorch_weights_dump_path}' ) torch.save(lowerCamelCase , lowerCamelCase ) print("""Conversion is done!""" ) if __name__ == "__main__": __UpperCamelCase : int = argparse.ArgumentParser() # Required parameters parser.add_argument( """--biogpt_checkpoint_path""", default=None, type=str, required=True, help=( """Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,""" """ bpecodes, etc.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) __UpperCamelCase : Optional[Any] = parser.parse_args() convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
53
1
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class __UpperCamelCase : def __init__( self : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str]=2 , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Union[str, Any]=False , _lowerCAmelCase : int=10 , _lowerCAmelCase : Tuple=3 , _lowerCAmelCase : Any=32 * 4 , _lowerCAmelCase : str=32 * 6 , _lowerCAmelCase : str=4 , _lowerCAmelCase : Union[str, Any]=32 , ) -> Any: """simple docstring""" __lowercase = parent __lowercase = batch_size __lowercase = is_training __lowercase = use_auxiliary_loss __lowercase = num_queries __lowercase = num_channels __lowercase = min_size __lowercase = max_size __lowercase = num_labels __lowercase = mask_feature_size def _a ( self : int ) -> Tuple: """simple docstring""" __lowercase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( _lowerCAmelCase ) __lowercase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_lowerCAmelCase ) __lowercase = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_lowerCAmelCase ) > 0.5 ).float() __lowercase = (torch.rand((self.batch_size, self.num_labels) , device=_lowerCAmelCase ) > 0.5).long() __lowercase = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def _a ( self : Tuple ) -> Tuple: """simple docstring""" return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def _a ( self : Tuple ) -> int: """simple docstring""" __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = self.prepare_config_and_inputs() __lowercase = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask} return config, inputs_dict def _a ( self : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Dict ) -> int: """simple docstring""" __lowercase = output.encoder_hidden_states __lowercase = output.pixel_decoder_hidden_states __lowercase = output.transformer_decoder_hidden_states self.parent.assertTrue(len(_lowerCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(_lowerCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(_lowerCAmelCase ) , config.decoder_config.decoder_layers ) def _a ( self : Optional[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any]=False ) -> Tuple: """simple docstring""" with torch.no_grad(): __lowercase = MaskFormerModel(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() __lowercase = model(pixel_values=_lowerCAmelCase , pixel_mask=_lowerCAmelCase ) __lowercase = model(_lowerCAmelCase , output_hidden_states=_lowerCAmelCase ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(_lowerCAmelCase , _lowerCAmelCase ) def _a ( self : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : int ) -> int: """simple docstring""" __lowercase = MaskFormerForInstanceSegmentation(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() def comm_check_on_output(_lowerCAmelCase : List[Any] ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): __lowercase = model(pixel_values=_lowerCAmelCase , pixel_mask=_lowerCAmelCase ) __lowercase = model(_lowerCAmelCase ) comm_check_on_output(_lowerCAmelCase ) __lowercase = model( pixel_values=_lowerCAmelCase , pixel_mask=_lowerCAmelCase , mask_labels=_lowerCAmelCase , class_labels=_lowerCAmelCase ) comm_check_on_output(_lowerCAmelCase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): __snake_case :str = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () __snake_case :Tuple = ( {'feature-extraction': MaskFormerModel, 'image-segmentation': MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) __snake_case :Tuple = False __snake_case :List[str] = False __snake_case :List[str] = False __snake_case :List[str] = False def _a ( self : Dict ) -> Dict: """simple docstring""" __lowercase = MaskFormerModelTester(self ) __lowercase = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase ) def _a ( self : Tuple ) -> Optional[Any]: """simple docstring""" self.config_tester.run_common_tests() def _a ( self : str ) -> Optional[Any]: """simple docstring""" __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(_lowerCAmelCase , **_lowerCAmelCase , output_hidden_states=_lowerCAmelCase ) def _a ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*_lowerCAmelCase ) @unittest.skip(reason="""MaskFormer does not use inputs_embeds""" ) def _a ( self : List[str] ) -> Union[str, Any]: """simple docstring""" pass @unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" ) def _a ( self : List[str] ) -> Dict: """simple docstring""" pass @unittest.skip(reason="""MaskFormer is not a generative model""" ) def _a ( self : Optional[int] ) -> Optional[int]: """simple docstring""" pass @unittest.skip(reason="""MaskFormer does not use token embeddings""" ) def _a ( self : Union[str, Any] ) -> List[str]: """simple docstring""" pass @require_torch_multi_gpu @unittest.skip( reason="""MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def _a ( self : Any ) -> List[Any]: """simple docstring""" pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def _a ( self : Optional[Any] ) -> Any: """simple docstring""" pass def _a ( self : int ) -> str: """simple docstring""" __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase = model_class(_lowerCAmelCase ) __lowercase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase = [*signature.parameters.keys()] __lowercase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , _lowerCAmelCase ) @slow def _a ( self : Any ) -> Any: """simple docstring""" for model_name in ["facebook/maskformer-swin-small-coco"]: __lowercase = MaskFormerModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) def _a ( self : List[str] ) -> Union[str, Any]: """simple docstring""" __lowercase = (self.model_tester.min_size,) * 2 __lowercase = { """pixel_values""": torch.randn((2, 3, *size) , device=_lowerCAmelCase ), """mask_labels""": torch.randn((2, 10, *size) , device=_lowerCAmelCase ), """class_labels""": torch.zeros(2 , 10 , device=_lowerCAmelCase ).long(), } __lowercase = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(_lowerCAmelCase ) __lowercase = model(**_lowerCAmelCase ) self.assertTrue(outputs.loss is not None ) def _a ( self : Optional[Any] ) -> str: """simple docstring""" __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(_lowerCAmelCase , **_lowerCAmelCase , output_hidden_states=_lowerCAmelCase ) def _a ( self : Union[str, Any] ) -> Any: """simple docstring""" __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase = model_class(_lowerCAmelCase ).to(_lowerCAmelCase ) __lowercase = model(**_lowerCAmelCase , output_attentions=_lowerCAmelCase ) self.assertTrue(outputs.attentions is not None ) def _a ( self : Dict ) -> Dict: """simple docstring""" if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss __lowercase = self.all_model_classes[1] __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs() __lowercase = model_class(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.train() __lowercase = model(_lowerCAmelCase , mask_labels=_lowerCAmelCase , class_labels=_lowerCAmelCase ).loss loss.backward() def _a ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" __lowercase = self.all_model_classes[1] __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs() __lowercase = True __lowercase = True __lowercase = model_class(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.train() __lowercase = model(_lowerCAmelCase , mask_labels=_lowerCAmelCase , class_labels=_lowerCAmelCase ) __lowercase = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() __lowercase = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't __lowercase = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() __lowercase = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=_lowerCAmelCase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) __UpperCamelCase : Optional[Any] = 1e-4 def snake_case ( ): '''simple docstring''' __lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_vision @slow class __UpperCamelCase ( unittest.TestCase ): @cached_property def _a ( self : Dict ) -> int: """simple docstring""" return ( MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" ) if is_vision_available() else None ) def _a ( self : str ) -> str: """simple docstring""" __lowercase = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(_lowerCAmelCase ) __lowercase = self.default_image_processor __lowercase = prepare_img() __lowercase = image_processor(_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase ) __lowercase = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(_lowerCAmelCase , (1, 3, 800, 1088) ) with torch.no_grad(): __lowercase = model(**_lowerCAmelCase ) __lowercase = torch.tensor( [[-0.0_482, 0.9_228, 0.4_951], [-0.2_547, 0.8_017, 0.8_527], [-0.0_069, 0.3_385, -0.0_089]] ).to(_lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) ) __lowercase = torch.tensor( [[-0.8_422, -0.8_434, -0.9_718], [-1.0_144, -0.5_565, -0.4_195], [-1.0_038, -0.4_484, -0.1_961]] ).to(_lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) ) __lowercase = torch.tensor( [[0.2_852, -0.0_159, 0.9_735], [0.6_254, 0.1_858, 0.8_529], [-0.0_680, -0.4_116, 1.8_413]] ).to(_lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) ) def _a ( self : int ) -> List[str]: """simple docstring""" __lowercase = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" ) .to(_lowerCAmelCase ) .eval() ) __lowercase = self.default_image_processor __lowercase = prepare_img() __lowercase = image_processor(_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase ) __lowercase = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(_lowerCAmelCase , (1, 3, 800, 1088) ) with torch.no_grad(): __lowercase = model(**_lowerCAmelCase ) # masks_queries_logits __lowercase = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) __lowercase = [ [-1.3_737_124, -1.7_724_937, -1.9_364_233], [-1.5_977_281, -1.9_867_939, -2.1_523_695], [-1.5_795_398, -1.9_269_832, -2.093_942], ] __lowercase = torch.tensor(_lowerCAmelCase ).to(_lowerCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) ) # class_queries_logits __lowercase = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) __lowercase = torch.tensor( [ [1.6512e00, -5.2572e00, -3.3519e00], [3.6169e-02, -5.9025e00, -2.9313e00], [1.0766e-04, -7.7630e00, -5.1263e00], ] ).to(_lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) ) def _a ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" __lowercase = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" ) .to(_lowerCAmelCase ) .eval() ) __lowercase = self.default_image_processor __lowercase = prepare_img() __lowercase = image_processor(_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase ) __lowercase = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(_lowerCAmelCase , (1, 3, 800, 1088) ) with torch.no_grad(): __lowercase = model(**_lowerCAmelCase ) # masks_queries_logits __lowercase = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) __lowercase = [[-0.9_046, -2.6_366, -4.6_062], [-3.4_179, -5.7_890, -8.8_057], [-4.9_179, -7.6_560, -10.7_711]] __lowercase = torch.tensor(_lowerCAmelCase ).to(_lowerCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) ) # class_queries_logits __lowercase = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) __lowercase = torch.tensor( [[4.7_188, -3.2_585, -2.8_857], [6.6_871, -2.9_181, -1.2_487], [7.2_449, -2.2_764, -2.1_874]] ).to(_lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) ) def _a ( self : List[Any] ) -> Optional[Any]: """simple docstring""" __lowercase = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" ) .to(_lowerCAmelCase ) .eval() ) __lowercase = self.default_image_processor __lowercase = image_processor( [np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="""pt""" , ) __lowercase = inputs["""pixel_values"""].to(_lowerCAmelCase ) __lowercase = [el.to(_lowerCAmelCase ) for el in inputs["""mask_labels"""]] __lowercase = [el.to(_lowerCAmelCase ) for el in inputs["""class_labels"""]] with torch.no_grad(): __lowercase = model(**_lowerCAmelCase ) self.assertTrue(outputs.loss is not None )
53
import tempfile import unittest import numpy as np from diffusers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionPipeline, PNDMScheduler, ) from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ): __snake_case :Union[str, Any] = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline' def _a ( self : Any , _lowerCAmelCase : str=0 ) -> str: """simple docstring""" __lowercase = np.random.RandomState(_lowerCAmelCase ) __lowercase = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def _a ( self : int ) -> List[Any]: """simple docstring""" __lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) __lowercase = self.get_dummy_inputs() __lowercase = pipe(**_lowerCAmelCase ).images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __lowercase = np.array([0.65_072, 0.58_492, 0.48_219, 0.55_521, 0.53_180, 0.55_939, 0.50_697, 0.39_800, 0.46_455] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _a ( self : Union[str, Any] ) -> str: """simple docstring""" __lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __lowercase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) __lowercase = self.get_dummy_inputs() __lowercase = pipe(**_lowerCAmelCase ).images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __lowercase = np.array([0.65_863, 0.59_425, 0.49_326, 0.56_313, 0.53_875, 0.56_627, 0.51_065, 0.39_777, 0.46_330] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _a ( self : Optional[int] ) -> Optional[int]: """simple docstring""" __lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __lowercase = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) __lowercase = self.get_dummy_inputs() __lowercase = pipe(**_lowerCAmelCase ).images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __lowercase = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _a ( self : Tuple ) -> int: """simple docstring""" __lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __lowercase = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) __lowercase = self.get_dummy_inputs() __lowercase = pipe(**_lowerCAmelCase ).images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __lowercase = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _a ( self : Tuple ) -> Union[str, Any]: """simple docstring""" __lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __lowercase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) __lowercase = self.get_dummy_inputs() __lowercase = pipe(**_lowerCAmelCase ).images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __lowercase = np.array([0.53_817, 0.60_812, 0.47_384, 0.49_530, 0.51_894, 0.49_814, 0.47_984, 0.38_958, 0.44_271] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _a ( self : Union[str, Any] ) -> List[str]: """simple docstring""" __lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) __lowercase = self.get_dummy_inputs() __lowercase = pipe(**_lowerCAmelCase ).images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __lowercase = np.array([0.53_895, 0.60_808, 0.47_933, 0.49_608, 0.51_886, 0.49_950, 0.48_053, 0.38_957, 0.44_200] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _a ( self : List[str] ) -> List[str]: """simple docstring""" __lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) __lowercase = self.get_dummy_inputs() __lowercase = 3 * [inputs["""prompt"""]] # forward __lowercase = pipe(**_lowerCAmelCase ) __lowercase = output.images[0, -3:, -3:, -1] __lowercase = self.get_dummy_inputs() __lowercase = 3 * [inputs.pop("""prompt""" )] __lowercase = pipe.tokenizer( _lowerCAmelCase , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors="""np""" , ) __lowercase = text_inputs["""input_ids"""] __lowercase = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] __lowercase = prompt_embeds # forward __lowercase = pipe(**_lowerCAmelCase ) __lowercase = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4 def _a ( self : int ) -> str: """simple docstring""" __lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) __lowercase = self.get_dummy_inputs() __lowercase = 3 * ["""this is a negative prompt"""] __lowercase = negative_prompt __lowercase = 3 * [inputs["""prompt"""]] # forward __lowercase = pipe(**_lowerCAmelCase ) __lowercase = output.images[0, -3:, -3:, -1] __lowercase = self.get_dummy_inputs() __lowercase = 3 * [inputs.pop("""prompt""" )] __lowercase = [] for p in [prompt, negative_prompt]: __lowercase = pipe.tokenizer( _lowerCAmelCase , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors="""np""" , ) __lowercase = text_inputs["""input_ids"""] embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] ) __lowercase , __lowercase = embeds # forward __lowercase = pipe(**_lowerCAmelCase ) __lowercase = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4 @nightly @require_onnxruntime @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): @property def _a ( self : Dict ) -> str: """simple docstring""" return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _a ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" __lowercase = ort.SessionOptions() __lowercase = False return options def _a ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" __lowercase = OnnxStableDiffusionPipeline.from_pretrained( """CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) __lowercase = """A painting of a squirrel eating a burger""" np.random.seed(0 ) __lowercase = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="""np""" ) __lowercase = output.images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) __lowercase = np.array([0.0_452, 0.0_390, 0.0_087, 0.0_350, 0.0_617, 0.0_364, 0.0_544, 0.0_523, 0.0_720] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _a ( self : Tuple ) -> Any: """simple docstring""" __lowercase = DDIMScheduler.from_pretrained( """runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" ) __lowercase = OnnxStableDiffusionPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) __lowercase = """open neural network exchange""" __lowercase = np.random.RandomState(0 ) __lowercase = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCAmelCase , output_type="""np""" ) __lowercase = output.images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) __lowercase = np.array([0.2_867, 0.1_974, 0.1_481, 0.7_294, 0.7_251, 0.6_667, 0.4_194, 0.5_642, 0.6_486] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _a ( self : Dict ) -> Dict: """simple docstring""" __lowercase = LMSDiscreteScheduler.from_pretrained( """runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" ) __lowercase = OnnxStableDiffusionPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) __lowercase = """open neural network exchange""" __lowercase = np.random.RandomState(0 ) __lowercase = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCAmelCase , output_type="""np""" ) __lowercase = output.images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) __lowercase = np.array([0.2_306, 0.1_959, 0.1_593, 0.6_549, 0.6_394, 0.5_408, 0.5_065, 0.6_010, 0.6_161] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _a ( self : str ) -> List[str]: """simple docstring""" __lowercase = 0 def test_callback_fn(_lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : np.ndarray ) -> None: __lowercase = True nonlocal number_of_steps number_of_steps += 1 if step == 0: assert latents.shape == (1, 4, 64, 64) __lowercase = latents[0, -3:, -3:, -1] __lowercase = np.array( [-0.6_772, -0.3_835, -1.2_456, 0.1_905, -1.0_974, 0.6_967, -1.9_353, 0.0_178, 1.0_167] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3 elif step == 5: assert latents.shape == (1, 4, 64, 64) __lowercase = latents[0, -3:, -3:, -1] __lowercase = np.array( [-0.3_351, 0.2_241, -0.1_837, -0.2_325, -0.6_577, 0.3_393, -0.0_241, 0.5_899, 1.3_875] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3 __lowercase = False __lowercase = OnnxStableDiffusionPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) __lowercase = """Andromeda galaxy in a bottle""" __lowercase = np.random.RandomState(0 ) pipe( prompt=_lowerCAmelCase , num_inference_steps=5 , guidance_scale=7.5 , generator=_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 , ) assert test_callback_fn.has_been_called assert number_of_steps == 6 def _a ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __lowercase = OnnxStableDiffusionPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) assert pipe.safety_checker is None __lowercase = pipe("""example prompt""" , num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(_lowerCAmelCase ) __lowercase = OnnxStableDiffusionPipeline.from_pretrained(_lowerCAmelCase ) # sanity check that the pipeline still works assert pipe.safety_checker is None __lowercase = pipe("""example prompt""" , num_inference_steps=2 ).images[0] assert image is not None
53
1
import math from enum import Enum from typing import Optional, Union from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from .utils import logging __UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) class __UpperCamelCase ( _lowerCAmelCase ): __snake_case :Dict = 'linear' __snake_case :str = 'cosine' __snake_case :int = 'cosine_with_restarts' __snake_case :List[str] = 'polynomial' __snake_case :List[Any] = 'constant' __snake_case :Optional[Any] = 'constant_with_warmup' __snake_case :Optional[int] = 'piecewise_constant' def snake_case ( lowerCamelCase , lowerCamelCase = -1 ): '''simple docstring''' return LambdaLR(lowerCamelCase , lambda lowerCamelCase : 1 , last_epoch=lowerCamelCase ) def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase = -1 ): '''simple docstring''' def lr_lambda(lowerCamelCase ): if current_step < num_warmup_steps: return float(lowerCamelCase ) / float(max(1.0 , lowerCamelCase ) ) return 1.0 return LambdaLR(lowerCamelCase , lowerCamelCase , last_epoch=lowerCamelCase ) def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase = -1 ): '''simple docstring''' __lowercase = {} __lowercase = step_rules.split(""",""" ) for rule_str in rule_list[:-1]: __lowercase , __lowercase = rule_str.split(""":""" ) __lowercase = int(lowerCamelCase ) __lowercase = float(lowerCamelCase ) __lowercase = value __lowercase = float(rule_list[-1] ) def create_rules_function(lowerCamelCase , lowerCamelCase ): def rule_func(lowerCamelCase ) -> float: __lowercase = sorted(rules_dict.keys() ) for i, sorted_step in enumerate(lowerCamelCase ): if steps < sorted_step: return rules_dict[sorted_steps[i]] return last_lr_multiple return rule_func __lowercase = create_rules_function(lowerCamelCase , lowerCamelCase ) return LambdaLR(lowerCamelCase , lowerCamelCase , last_epoch=lowerCamelCase ) def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=-1 ): '''simple docstring''' def lr_lambda(lowerCamelCase ): if current_step < num_warmup_steps: return float(lowerCamelCase ) / float(max(1 , lowerCamelCase ) ) return max( 0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) ) return LambdaLR(lowerCamelCase , lowerCamelCase , lowerCamelCase ) def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = 0.5 , lowerCamelCase = -1 ): '''simple docstring''' def lr_lambda(lowerCamelCase ): if current_step < num_warmup_steps: return float(lowerCamelCase ) / float(max(1 , lowerCamelCase ) ) __lowercase = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCamelCase ) * 2.0 * progress )) ) return LambdaLR(lowerCamelCase , lowerCamelCase , lowerCamelCase ) def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = 1 , lowerCamelCase = -1 ): '''simple docstring''' def lr_lambda(lowerCamelCase ): if current_step < num_warmup_steps: return float(lowerCamelCase ) / float(max(1 , lowerCamelCase ) ) __lowercase = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) if progress >= 1.0: return 0.0 return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCamelCase ) * progress) % 1.0) )) ) return LambdaLR(lowerCamelCase , lowerCamelCase , lowerCamelCase ) def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=1e-7 , lowerCamelCase=1.0 , lowerCamelCase=-1 ): '''simple docstring''' __lowercase = optimizer.defaults["""lr"""] if not (lr_init > lr_end): raise ValueError(F'lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})' ) def lr_lambda(lowerCamelCase ): if current_step < num_warmup_steps: return float(lowerCamelCase ) / float(max(1 , lowerCamelCase ) ) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: __lowercase = lr_init - lr_end __lowercase = num_training_steps - num_warmup_steps __lowercase = 1 - (current_step - num_warmup_steps) / decay_steps __lowercase = lr_range * pct_remaining**power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init return LambdaLR(lowerCamelCase , lowerCamelCase , lowerCamelCase ) __UpperCamelCase : Optional[Any] = { SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule, } def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = 1 , lowerCamelCase = 1.0 , lowerCamelCase = -1 , ): '''simple docstring''' __lowercase = SchedulerType(lowerCamelCase ) __lowercase = TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: return schedule_func(lowerCamelCase , last_epoch=lowerCamelCase ) if name == SchedulerType.PIECEWISE_CONSTANT: return schedule_func(lowerCamelCase , step_rules=lowerCamelCase , last_epoch=lowerCamelCase ) # All other schedulers require `num_warmup_steps` if num_warmup_steps is None: raise ValueError(F'{name} requires `num_warmup_steps`, please provide that argument.' ) if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(lowerCamelCase , num_warmup_steps=lowerCamelCase , last_epoch=lowerCamelCase ) # All other schedulers require `num_training_steps` if num_training_steps is None: raise ValueError(F'{name} requires `num_training_steps`, please provide that argument.' ) if name == SchedulerType.COSINE_WITH_RESTARTS: return schedule_func( lowerCamelCase , num_warmup_steps=lowerCamelCase , num_training_steps=lowerCamelCase , num_cycles=lowerCamelCase , last_epoch=lowerCamelCase , ) if name == SchedulerType.POLYNOMIAL: return schedule_func( lowerCamelCase , num_warmup_steps=lowerCamelCase , num_training_steps=lowerCamelCase , power=lowerCamelCase , last_epoch=lowerCamelCase , ) return schedule_func( lowerCamelCase , num_warmup_steps=lowerCamelCase , num_training_steps=lowerCamelCase , last_epoch=lowerCamelCase )
53
def snake_case ( lowerCamelCase ): '''simple docstring''' __lowercase = """""" for ch in key: if ch == " " or ch not in key_no_dups and ch.isalpha(): key_no_dups += ch return key_no_dups def snake_case ( lowerCamelCase ): '''simple docstring''' __lowercase = [chr(i + 65 ) for i in range(26 )] # Remove duplicate characters from key __lowercase = remove_duplicates(key.upper() ) __lowercase = len(lowerCamelCase ) # First fill cipher with key characters __lowercase = {alphabet[i]: char for i, char in enumerate(lowerCamelCase )} # Then map remaining characters in alphabet to # the alphabet from the beginning for i in range(len(lowerCamelCase ) , 26 ): __lowercase = alphabet[i - offset] # Ensure we are not mapping letters to letters previously mapped while char in key: offset -= 1 __lowercase = alphabet[i - offset] __lowercase = char return cipher_alphabet def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' return "".join(cipher_map.get(lowerCamelCase , lowerCamelCase ) for ch in message.upper() ) def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = {v: k for k, v in cipher_map.items()} return "".join(rev_cipher_map.get(lowerCamelCase , lowerCamelCase ) for ch in message.upper() ) def snake_case ( ): '''simple docstring''' __lowercase = input("""Enter message to encode or decode: """ ).strip() __lowercase = input("""Enter keyword: """ ).strip() __lowercase = input("""Encipher or decipher? E/D:""" ).strip()[0].lower() try: __lowercase = {"""e""": encipher, """d""": decipher}[option] except KeyError: raise KeyError("""invalid input option""" ) __lowercase = create_cipher_map(lowerCamelCase ) print(func(lowerCamelCase , lowerCamelCase ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
53
1
import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: __UpperCamelCase : Optional[Any] = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class __UpperCamelCase ( unittest.TestCase ): def __init__( self : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int=7 , _lowerCAmelCase : Optional[int]=3 , _lowerCAmelCase : str=18 , _lowerCAmelCase : List[Any]=30 , _lowerCAmelCase : Dict=400 , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Dict=True , _lowerCAmelCase : int=None , ) -> Optional[Any]: """simple docstring""" __lowercase = size if size is not None else {"""height""": 20, """width""": 20} __lowercase = parent __lowercase = batch_size __lowercase = num_channels __lowercase = image_size __lowercase = min_resolution __lowercase = max_resolution __lowercase = size __lowercase = do_normalize __lowercase = do_convert_rgb __lowercase = [512, 1024, 2048, 4096] __lowercase = patch_size if patch_size is not None else {"""height""": 16, """width""": 16} def _a ( self : str ) -> Tuple: """simple docstring""" return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def _a ( self : List[Any] ) -> Optional[int]: """simple docstring""" __lowercase = """https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg""" __lowercase = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ).convert("""RGB""" ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , ) @require_torch @require_vision class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ): __snake_case :List[str] = PixaStructImageProcessor if is_vision_available() else None def _a ( self : List[str] ) -> Union[str, Any]: """simple docstring""" __lowercase = PixaStructImageProcessingTester(self ) @property def _a ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def _a ( self : Any ) -> Optional[Any]: """simple docstring""" __lowercase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCAmelCase , """do_normalize""" ) ) self.assertTrue(hasattr(_lowerCAmelCase , """do_convert_rgb""" ) ) def _a ( self : Tuple ) -> str: """simple docstring""" __lowercase = self.image_processor_tester.prepare_dummy_image() __lowercase = self.image_processing_class(**self.image_processor_dict ) __lowercase = 2048 __lowercase = image_processor(_lowerCAmelCase , return_tensors="""pt""" , max_patches=_lowerCAmelCase ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0_606 ) , atol=1e-3 , rtol=1e-3 ) ) def _a ( self : Any ) -> Any: """simple docstring""" __lowercase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase , Image.Image ) # Test not batched input __lowercase = ( (self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input __lowercase = image_processor( image_inputs[0] , return_tensors="""pt""" , max_patches=_lowerCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched __lowercase = image_processor( _lowerCAmelCase , return_tensors="""pt""" , max_patches=_lowerCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def _a ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" __lowercase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase , Image.Image ) # Test not batched input __lowercase = ( (self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""]) * self.image_processor_tester.num_channels ) + 2 __lowercase = True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(_lowerCAmelCase ): __lowercase = image_processor( image_inputs[0] , return_tensors="""pt""" , max_patches=_lowerCAmelCase ).flattened_patches __lowercase = """Hello""" __lowercase = image_processor( image_inputs[0] , return_tensors="""pt""" , max_patches=_lowerCAmelCase , header_text=_lowerCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched __lowercase = image_processor( _lowerCAmelCase , return_tensors="""pt""" , max_patches=_lowerCAmelCase , header_text=_lowerCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def _a ( self : List[str] ) -> List[str]: """simple docstring""" __lowercase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase , np.ndarray ) __lowercase = ( (self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input __lowercase = image_processor( image_inputs[0] , return_tensors="""pt""" , max_patches=_lowerCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched __lowercase = image_processor( _lowerCAmelCase , return_tensors="""pt""" , max_patches=_lowerCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def _a ( self : List[str] ) -> Optional[int]: """simple docstring""" __lowercase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase , torch.Tensor ) # Test not batched input __lowercase = ( (self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input __lowercase = image_processor( image_inputs[0] , return_tensors="""pt""" , max_patches=_lowerCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched __lowercase = image_processor( _lowerCAmelCase , return_tensors="""pt""" , max_patches=_lowerCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , ) @require_torch @require_vision class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ): __snake_case :int = PixaStructImageProcessor if is_vision_available() else None def _a ( self : Dict ) -> Union[str, Any]: """simple docstring""" __lowercase = PixaStructImageProcessingTester(self , num_channels=4 ) __lowercase = 3 @property def _a ( self : int ) -> int: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def _a ( self : Any ) -> List[str]: """simple docstring""" __lowercase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCAmelCase , """do_normalize""" ) ) self.assertTrue(hasattr(_lowerCAmelCase , """do_convert_rgb""" ) ) def _a ( self : Dict ) -> int: """simple docstring""" __lowercase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase , Image.Image ) # Test not batched input __lowercase = ( (self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""]) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input __lowercase = image_processor( image_inputs[0] , return_tensors="""pt""" , max_patches=_lowerCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched __lowercase = image_processor( _lowerCAmelCase , return_tensors="""pt""" , max_patches=_lowerCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
53
import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): __snake_case :Any = IFInpaintingPipeline __snake_case :str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'} __snake_case :Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS __snake_case :str = PipelineTesterMixin.required_optional_params - {'latents'} def _a ( self : Union[str, Any] ) -> Tuple: """simple docstring""" return self._get_dummy_components() def _a ( self : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict=0 ) -> Any: """simple docstring""" if str(_lowerCAmelCase ).startswith("""mps""" ): __lowercase = torch.manual_seed(_lowerCAmelCase ) else: __lowercase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase ) __lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase ) __lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase ) __lowercase = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """mask_image""": mask_image, """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def _a ( self : Tuple ) -> Dict: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) def _a ( self : Union[str, Any] ) -> Dict: """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" ) def _a ( self : List[str] ) -> Tuple: """simple docstring""" super().test_save_load_floataa(expected_max_diff=1e-1 ) def _a ( self : Tuple ) -> Optional[Any]: """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def _a ( self : str ) -> Optional[int]: """simple docstring""" self._test_save_load_local() def _a ( self : List[str] ) -> List[Any]: """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1e-2 , )
53
1
import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() __UpperCamelCase : str = logging.get_logger(__name__) def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = RobertaPreLayerNormConfig.from_pretrained( lowerCamelCase , architectures=["""RobertaPreLayerNormForMaskedLM"""] ) # convert state_dict __lowercase = torch.load(hf_hub_download(repo_id=lowerCamelCase , filename="""pytorch_model.bin""" ) ) __lowercase = {} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith("""roberta.""" ): __lowercase = """roberta_prelayernorm.""" + tensor_key[len("""roberta.""" ) :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith(""".self.LayerNorm.weight""" ) or tensor_key.endswith(""".self.LayerNorm.bias""" ): continue __lowercase = tensor_value __lowercase = RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=lowerCamelCase , config=lowerCamelCase , state_dict=lowerCamelCase ) model.save_pretrained(lowerCamelCase ) # convert tokenizer __lowercase = AutoTokenizer.from_pretrained(lowerCamelCase ) tokenizer.save_pretrained(lowerCamelCase ) if __name__ == "__main__": __UpperCamelCase : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint-repo""", default=None, type=str, required=True, help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) __UpperCamelCase : Dict = parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
53
import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class __UpperCamelCase ( _lowerCAmelCase ): __snake_case :str = (UnCLIPScheduler,) def _a ( self : Optional[int] , **_lowerCAmelCase : Any ) -> Tuple: """simple docstring""" __lowercase = { """num_train_timesteps""": 1000, """variance_type""": """fixed_small_log""", """clip_sample""": True, """clip_sample_range""": 1.0, """prediction_type""": """epsilon""", } config.update(**_lowerCAmelCase ) return config def _a ( self : Dict ) -> List[Any]: """simple docstring""" for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=_lowerCAmelCase ) def _a ( self : List[str] ) -> Tuple: """simple docstring""" for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=_lowerCAmelCase ) def _a ( self : Any ) -> Any: """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=_lowerCAmelCase ) def _a ( self : Any ) -> Optional[Any]: """simple docstring""" for clip_sample_range in [1, 5, 10, 20]: self.check_over_configs(clip_sample_range=_lowerCAmelCase ) def _a ( self : Optional[int] ) -> Tuple: """simple docstring""" for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=_lowerCAmelCase ) def _a ( self : str ) -> int: """simple docstring""" for time_step in [0, 500, 999]: for prev_timestep in [None, 5, 100, 250, 500, 750]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=_lowerCAmelCase , prev_timestep=_lowerCAmelCase ) def _a ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __lowercase = self.scheduler_classes[0] __lowercase = self.get_scheduler_config(variance_type="""fixed_small_log""" ) __lowercase = scheduler_class(**_lowerCAmelCase ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1e-5 def _a ( self : str ) -> Optional[int]: """simple docstring""" __lowercase = self.scheduler_classes[0] __lowercase = self.get_scheduler_config(variance_type="""learned_range""" ) __lowercase = scheduler_class(**_lowerCAmelCase ) __lowercase = 0.5 assert scheduler._get_variance(1 , predicted_variance=_lowerCAmelCase ) - -10.1_712_790 < 1e-5 assert scheduler._get_variance(487 , predicted_variance=_lowerCAmelCase ) - -5.7_998_052 < 1e-5 assert scheduler._get_variance(999 , predicted_variance=_lowerCAmelCase ) - -0.0_010_011 < 1e-5 def _a ( self : Optional[Any] ) -> List[Any]: """simple docstring""" __lowercase = self.scheduler_classes[0] __lowercase = self.get_scheduler_config() __lowercase = scheduler_class(**_lowerCAmelCase ) __lowercase = scheduler.timesteps __lowercase = self.dummy_model() __lowercase = self.dummy_sample_deter __lowercase = torch.manual_seed(0 ) for i, t in enumerate(_lowerCAmelCase ): # 1. predict noise residual __lowercase = model(_lowerCAmelCase , _lowerCAmelCase ) # 2. predict previous mean of sample x_t-1 __lowercase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample __lowercase = pred_prev_sample __lowercase = torch.sum(torch.abs(_lowerCAmelCase ) ) __lowercase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_sum.item() - 252.2_682_495 ) < 1e-2 assert abs(result_mean.item() - 0.3_284_743 ) < 1e-3 def _a ( self : List[str] ) -> str: """simple docstring""" __lowercase = self.scheduler_classes[0] __lowercase = self.get_scheduler_config() __lowercase = scheduler_class(**_lowerCAmelCase ) scheduler.set_timesteps(25 ) __lowercase = scheduler.timesteps __lowercase = self.dummy_model() __lowercase = self.dummy_sample_deter __lowercase = torch.manual_seed(0 ) for i, t in enumerate(_lowerCAmelCase ): # 1. predict noise residual __lowercase = model(_lowerCAmelCase , _lowerCAmelCase ) if i + 1 == timesteps.shape[0]: __lowercase = None else: __lowercase = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 __lowercase = scheduler.step( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , prev_timestep=_lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample __lowercase = pred_prev_sample __lowercase = torch.sum(torch.abs(_lowerCAmelCase ) ) __lowercase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_sum.item() - 258.2_044_983 ) < 1e-2 assert abs(result_mean.item() - 0.3_362_038 ) < 1e-3 def _a ( self : str ) -> Union[str, Any]: """simple docstring""" pass def _a ( self : int ) -> List[str]: """simple docstring""" pass
53
1
import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging __UpperCamelCase : Tuple = logging.get_logger(__name__) def snake_case ( lowerCamelCase ): '''simple docstring''' __lowercase = r"""\w+[.]\d+""" __lowercase = re.findall(lowerCamelCase , lowerCamelCase ) for pat in pats: __lowercase = key.replace(lowerCamelCase , """_""".join(pat.split(""".""" ) ) ) return key def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = pt_tuple_key[:-1] + ("""scale""",) if ( any("""norm""" in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): __lowercase = pt_tuple_key[:-1] + ("""scale""",) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: __lowercase = pt_tuple_key[:-1] + ("""scale""",) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: __lowercase = pt_tuple_key[:-1] + ("""embedding""",) return renamed_pt_tuple_key, pt_tensor # conv layer __lowercase = pt_tuple_key[:-1] + ("""kernel""",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: __lowercase = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer __lowercase = pt_tuple_key[:-1] + ("""kernel""",) if pt_tuple_key[-1] == "weight": __lowercase = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight __lowercase = pt_tuple_key[:-1] + ("""weight""",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias __lowercase = pt_tuple_key[:-1] + ("""bias""",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase=42 ): '''simple docstring''' __lowercase = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params __lowercase = flax_model.init_weights(PRNGKey(lowerCamelCase ) ) __lowercase = flatten_dict(lowerCamelCase ) __lowercase = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): __lowercase = rename_key(lowerCamelCase ) __lowercase = tuple(renamed_pt_key.split(""".""" ) ) # Correctly rename weight parameters __lowercase , __lowercase = rename_key_and_reshape_tensor(lowerCamelCase , lowerCamelCase , lowerCamelCase ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ' F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) # also add unexpected weight so that warning is thrown __lowercase = jnp.asarray(lowerCamelCase ) return unflatten_dict(lowerCamelCase )
53
import os import time import warnings from dataclasses import dataclass, field from enum import Enum from typing import List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import logging from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors from ..processors.utils import InputFeatures __UpperCamelCase : Any = logging.get_logger(__name__) @dataclass class __UpperCamelCase : __snake_case :str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys() )} ) __snake_case :str = field( metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} ) __snake_case :int = field( default=1_2_8 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) __snake_case :bool = field( default=_lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) def _a ( self : Dict ) -> List[Any]: """simple docstring""" __lowercase = self.task_name.lower() class __UpperCamelCase ( _lowerCAmelCase ): __snake_case :Optional[int] = 'train' __snake_case :int = 'dev' __snake_case :Any = 'test' class __UpperCamelCase ( _lowerCAmelCase ): __snake_case :GlueDataTrainingArguments __snake_case :str __snake_case :List[InputFeatures] def __init__( self : Dict , _lowerCAmelCase : GlueDataTrainingArguments , _lowerCAmelCase : PreTrainedTokenizerBase , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Union[str, Split] = Split.train , _lowerCAmelCase : Optional[str] = None , ) -> List[Any]: """simple docstring""" warnings.warn( """This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """ """library. You can have a look at this example script for pointers: """ """https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , _lowerCAmelCase , ) __lowercase = args __lowercase = glue_processors[args.task_name]() __lowercase = glue_output_modes[args.task_name] if isinstance(_lowerCAmelCase , _lowerCAmelCase ): try: __lowercase = Split[mode] except KeyError: raise KeyError("""mode is not a valid split name""" ) # Load data features from cache or dataset file __lowercase = os.path.join( cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , ) __lowercase = self.processor.get_labels() if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in ( "RobertaTokenizer", "RobertaTokenizerFast", "XLMRobertaTokenizer", "BartTokenizer", "BartTokenizerFast", ): # HACK(label indices are swapped in RoBERTa pretrained model) __lowercase , __lowercase = label_list[2], label_list[1] __lowercase = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. __lowercase = cached_features_file + """.lock""" with FileLock(_lowerCAmelCase ): if os.path.exists(_lowerCAmelCase ) and not args.overwrite_cache: __lowercase = time.time() __lowercase = torch.load(_lowerCAmelCase ) logger.info( F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start ) else: logger.info(F'Creating features from dataset file at {args.data_dir}' ) if mode == Split.dev: __lowercase = self.processor.get_dev_examples(args.data_dir ) elif mode == Split.test: __lowercase = self.processor.get_test_examples(args.data_dir ) else: __lowercase = self.processor.get_train_examples(args.data_dir ) if limit_length is not None: __lowercase = examples[:limit_length] __lowercase = glue_convert_examples_to_features( _lowerCAmelCase , _lowerCAmelCase , max_length=args.max_seq_length , label_list=_lowerCAmelCase , output_mode=self.output_mode , ) __lowercase = time.time() torch.save(self.features , _lowerCAmelCase ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' ) def __len__( self : Dict ) -> Optional[int]: """simple docstring""" return len(self.features ) def __getitem__( self : Tuple , _lowerCAmelCase : Optional[int] ) -> InputFeatures: """simple docstring""" return self.features[i] def _a ( self : str ) -> int: """simple docstring""" return self.label_list
53
1
from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal __UpperCamelCase : Optional[Any] = logging.get_logger(__name__) __UpperCamelCase : Union[str, Any] = TypeVar("""DatasetType""", Dataset, IterableDataset) def snake_case ( lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = "first_exhausted" , ): '''simple docstring''' from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError("""Unable to interleave an empty list of datasets.""" ) for i, dataset in enumerate(lowerCamelCase ): if not isinstance(lowerCamelCase , (Dataset, IterableDataset) ): if isinstance(lowerCamelCase , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} ' """is an empty dataset dictionary.""" ) raise ValueError( F'Dataset at position {i} has at least one split: {list(lowerCamelCase )}\n' F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(lowerCamelCase ) )}\']' ) raise ValueError( F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowerCamelCase ).__name__}.' ) if i == 0: __lowercase , __lowercase = ( (Dataset, IterableDataset) if isinstance(lowerCamelCase , lowerCamelCase ) else (IterableDataset, Dataset) ) elif not isinstance(lowerCamelCase , lowerCamelCase ): raise ValueError( F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(F'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' ) if dataset_type is Dataset: return _interleave_map_style_datasets( lowerCamelCase , lowerCamelCase , lowerCamelCase , info=lowerCamelCase , split=lowerCamelCase , stopping_strategy=lowerCamelCase ) else: return _interleave_iterable_datasets( lowerCamelCase , lowerCamelCase , lowerCamelCase , info=lowerCamelCase , split=lowerCamelCase , stopping_strategy=lowerCamelCase ) def snake_case ( lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = 0 , ): '''simple docstring''' if not dsets: raise ValueError("""Unable to concatenate an empty list of datasets.""" ) for i, dataset in enumerate(lowerCamelCase ): if not isinstance(lowerCamelCase , (Dataset, IterableDataset) ): if isinstance(lowerCamelCase , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} ' """is an empty dataset dictionary.""" ) raise ValueError( F'Dataset at position {i} has at least one split: {list(lowerCamelCase )}\n' F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(lowerCamelCase ) )}\']' ) raise ValueError( F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowerCamelCase ).__name__}.' ) if i == 0: __lowercase , __lowercase = ( (Dataset, IterableDataset) if isinstance(lowerCamelCase , lowerCamelCase ) else (IterableDataset, Dataset) ) elif not isinstance(lowerCamelCase , lowerCamelCase ): raise ValueError( F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' ) if dataset_type is Dataset: return _concatenate_map_style_datasets(lowerCamelCase , info=lowerCamelCase , split=lowerCamelCase , axis=lowerCamelCase ) else: return _concatenate_iterable_datasets(lowerCamelCase , info=lowerCamelCase , split=lowerCamelCase , axis=lowerCamelCase )
53
import logging import math import os from dataclasses import dataclass, field from glob import glob from typing import Optional from torch.utils.data import ConcatDataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_WITH_LM_HEAD_MAPPING, AutoConfig, AutoModelWithLMHead, AutoTokenizer, DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForWholeWordMask, HfArgumentParser, LineByLineTextDataset, LineByLineWithRefDataset, PreTrainedTokenizer, TextDataset, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process __UpperCamelCase : List[Any] = logging.getLogger(__name__) __UpperCamelCase : Optional[Any] = list(MODEL_WITH_LM_HEAD_MAPPING.keys()) __UpperCamelCase : List[str] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class __UpperCamelCase : __snake_case :Optional[str] = field( default=_lowerCAmelCase , metadata={ 'help': ( 'The model checkpoint for weights initialization. Leave None if you want to train a model from' ' scratch.' ) } , ) __snake_case :Optional[str] = field( default=_lowerCAmelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(_lowerCAmelCase )} , ) __snake_case :Optional[str] = field( default=_lowerCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) __snake_case :Optional[str] = field( default=_lowerCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) __snake_case :Optional[str] = field( default=_lowerCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) @dataclass class __UpperCamelCase : __snake_case :Optional[str] = field( default=_lowerCAmelCase , metadata={'help': 'The input training data file (a text file).'} ) __snake_case :Optional[str] = field( default=_lowerCAmelCase , metadata={ 'help': ( 'The input training data files (multiple files in glob format). ' 'Very often splitting large files to smaller files can prevent tokenizer going out of memory' ) } , ) __snake_case :Optional[str] = field( default=_lowerCAmelCase , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , ) __snake_case :Optional[str] = field( default=_lowerCAmelCase , metadata={'help': 'An optional input train ref data file for whole word mask in Chinese.'} , ) __snake_case :Optional[str] = field( default=_lowerCAmelCase , metadata={'help': 'An optional input eval ref data file for whole word mask in Chinese.'} , ) __snake_case :bool = field( default=_lowerCAmelCase , metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'} , ) __snake_case :bool = field( default=_lowerCAmelCase , metadata={'help': 'Train with masked-language modeling loss instead of language modeling.'} ) __snake_case :bool = field(default=_lowerCAmelCase , metadata={'help': 'Whether ot not to use whole word mask.'} ) __snake_case :float = field( default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} ) __snake_case :float = field( default=1 / 6 , metadata={ 'help': ( 'Ratio of length of a span of masked tokens to surrounding context length for permutation language' ' modeling.' ) } , ) __snake_case :int = field( default=5 , metadata={'help': 'Maximum length of a span of masked tokens for permutation language modeling.'} ) __snake_case :int = field( default=-1 , metadata={ 'help': ( 'Optional input sequence length after tokenization.' 'The training dataset will be truncated in block of this size for training.' 'Default to the model max input length for single sentence inputs (take into account special tokens).' ) } , ) __snake_case :bool = field( default=_lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = None , ): '''simple docstring''' def _dataset(lowerCamelCase , lowerCamelCase=None ): if args.line_by_line: if ref_path is not None: if not args.whole_word_mask or not args.mlm: raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""" ) return LineByLineWithRefDataset( tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size , ref_path=lowerCamelCase , ) return LineByLineTextDataset(tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size ) else: return TextDataset( tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=lowerCamelCase , ) if evaluate: return _dataset(args.eval_data_file , args.eval_ref_file ) elif args.train_data_files: return ConcatDataset([_dataset(lowerCamelCase ) for f in glob(args.train_data_files )] ) else: return _dataset(args.train_data_file , args.train_ref_file ) def snake_case ( ): '''simple docstring''' __lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) __lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses() if data_args.eval_data_file is None and training_args.do_eval: raise ValueError( """Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """ """or remove the --do_eval argument.""" ) if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F'Output directory ({training_args.output_dir}) already exists and is not empty. Use' """ --overwrite_output_dir to overcome.""" ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( """Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("""Training/evaluation parameters %s""" , lowerCamelCase ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. if model_args.config_name: __lowercase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: __lowercase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir ) else: __lowercase = CONFIG_MAPPING[model_args.model_type]() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.tokenizer_name: __lowercase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: __lowercase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir ) else: raise ValueError( """You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another""" """ script, save it,and load it from here, using --tokenizer_name""" ) if model_args.model_name_or_path: __lowercase = AutoModelWithLMHead.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , ) else: logger.info("""Training new model from scratch""" ) __lowercase = AutoModelWithLMHead.from_config(lowerCamelCase ) model.resize_token_embeddings(len(lowerCamelCase ) ) if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm: raise ValueError( """BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the""" """--mlm flag (masked language modeling).""" ) if data_args.block_size <= 0: __lowercase = tokenizer.max_len # Our input block size will be the max possible for the model else: __lowercase = min(data_args.block_size , tokenizer.max_len ) # Get datasets __lowercase = ( get_dataset(lowerCamelCase , tokenizer=lowerCamelCase , cache_dir=model_args.cache_dir ) if training_args.do_train else None ) __lowercase = ( get_dataset(lowerCamelCase , tokenizer=lowerCamelCase , evaluate=lowerCamelCase , cache_dir=model_args.cache_dir ) if training_args.do_eval else None ) if config.model_type == "xlnet": __lowercase = DataCollatorForPermutationLanguageModeling( tokenizer=lowerCamelCase , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , ) else: if data_args.mlm and data_args.whole_word_mask: __lowercase = DataCollatorForWholeWordMask( tokenizer=lowerCamelCase , mlm_probability=data_args.mlm_probability ) else: __lowercase = DataCollatorForLanguageModeling( tokenizer=lowerCamelCase , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability ) # Initialize our Trainer __lowercase = Trainer( model=lowerCamelCase , args=lowerCamelCase , data_collator=lowerCamelCase , train_dataset=lowerCamelCase , eval_dataset=lowerCamelCase , prediction_loss_only=lowerCamelCase , ) # Training if training_args.do_train: __lowercase = ( model_args.model_name_or_path if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ) else None ) trainer.train(model_path=lowerCamelCase ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation __lowercase = {} if training_args.do_eval: logger.info("""*** Evaluate ***""" ) __lowercase = trainer.evaluate() __lowercase = math.exp(eval_output["""eval_loss"""] ) __lowercase = {"""perplexity""": perplexity} __lowercase = os.path.join(training_args.output_dir , """eval_results_lm.txt""" ) if trainer.is_world_master(): with open(lowerCamelCase , """w""" ) as writer: logger.info("""***** Eval results *****""" ) for key in sorted(result.keys() ): logger.info(""" %s = %s""" , lowerCamelCase , str(result[key] ) ) writer.write("""%s = %s\n""" % (key, str(result[key] )) ) results.update(lowerCamelCase ) return results def snake_case ( lowerCamelCase ): '''simple docstring''' main() if __name__ == "__main__": main()
53
1
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to properly calculate the metrics on the # validation dataset when in a distributed system, and builds off the # `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __UpperCamelCase : Optional[Any] = 16 __UpperCamelCase : int = 32 def snake_case ( lowerCamelCase , lowerCamelCase = 16 ): '''simple docstring''' __lowercase = AutoTokenizer.from_pretrained("""bert-base-cased""" ) __lowercase = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(lowerCamelCase ): # max_length=None => use the model max length (it's actually the default) __lowercase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase , max_length=lowerCamelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): __lowercase = datasets.map( lowerCamelCase , batched=lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __lowercase = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(lowerCamelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. __lowercase = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": __lowercase = 16 elif accelerator.mixed_precision != "no": __lowercase = 8 else: __lowercase = None return tokenizer.pad( lowerCamelCase , padding="""longest""" , max_length=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_tensors="""pt""" , ) # Instantiate dataloaders. __lowercase = DataLoader( tokenized_datasets["""train"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase ) __lowercase = DataLoader( tokenized_datasets["""validation"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders __UpperCamelCase : List[str] = mocked_dataloaders # noqa: F811 def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowerCamelCase ) == "1": __lowercase = 2 # Initialize accelerator __lowercase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __lowercase = config["""lr"""] __lowercase = int(config["""num_epochs"""] ) __lowercase = int(config["""seed"""] ) __lowercase = int(config["""batch_size"""] ) __lowercase = evaluate.load("""glue""" , """mrpc""" ) # If the batch size is too big we use gradient accumulation __lowercase = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: __lowercase = batch_size // MAX_GPU_BATCH_SIZE __lowercase = MAX_GPU_BATCH_SIZE set_seed(lowerCamelCase ) __lowercase , __lowercase = get_dataloaders(lowerCamelCase , lowerCamelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __lowercase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCamelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __lowercase = model.to(accelerator.device ) # Instantiate optimizer __lowercase = AdamW(params=model.parameters() , lr=lowerCamelCase ) # Instantiate scheduler __lowercase = get_linear_schedule_with_warmup( optimizer=lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(lowerCamelCase ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = accelerator.prepare( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) # Now we train the model for epoch in range(lowerCamelCase ): model.train() for step, batch in enumerate(lowerCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) __lowercase = model(**lowerCamelCase ) __lowercase = outputs.loss __lowercase = loss / gradient_accumulation_steps accelerator.backward(lowerCamelCase ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() __lowercase = 0 for step, batch in enumerate(lowerCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __lowercase = model(**lowerCamelCase ) __lowercase = outputs.logits.argmax(dim=-1 ) __lowercase , __lowercase = accelerator.gather((predictions, batch["""labels"""]) ) # New Code # # First we check if it's a distributed system if accelerator.use_distributed: # Then see if we're on the last batch of our eval dataloader if step == len(lowerCamelCase ) - 1: # Last batch needs to be truncated on distributed systems as it contains additional samples __lowercase = predictions[: len(eval_dataloader.dataset ) - samples_seen] __lowercase = references[: len(eval_dataloader.dataset ) - samples_seen] else: # Otherwise we add the number of samples seen samples_seen += references.shape[0] # All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`: # accelerator.gather_for_metrics((predictions, batch["labels"])) metric.add_batch( predictions=lowerCamelCase , references=lowerCamelCase , ) __lowercase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'epoch {epoch}:' , lowerCamelCase ) def snake_case ( ): '''simple docstring''' __lowercase = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=lowerCamelCase , default=lowerCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) __lowercase = parser.parse_args() __lowercase = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(lowerCamelCase , lowerCamelCase ) if __name__ == "__main__": main()
53
from __future__ import annotations def snake_case ( lowerCamelCase ): '''simple docstring''' if len(lowerCamelCase ) < 2: raise ValueError("""Monogons and Digons are not polygons in the Euclidean space""" ) if any(i <= 0 for i in nums ): raise ValueError("""All values must be greater than 0""" ) __lowercase = nums.copy() copy_nums.sort() return copy_nums[-1] < sum(copy_nums[:-1] ) if __name__ == "__main__": import doctest doctest.testmod()
53
1
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __UpperCamelCase : Any = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Dict = [ """MRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """MraForMaskedLM""", """MraForMultipleChoice""", """MraForQuestionAnswering""", """MraForSequenceClassification""", """MraForTokenClassification""", """MraLayer""", """MraModel""", """MraPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys __UpperCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
53
from __future__ import annotations def snake_case ( lowerCamelCase ): '''simple docstring''' if not nums: return 0 __lowercase = nums[0] __lowercase = 0 for num in nums[1:]: __lowercase , __lowercase = ( max_excluding + num, max(lowerCamelCase , lowerCamelCase ), ) return max(lowerCamelCase , lowerCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
53
1
import math import qiskit def snake_case ( lowerCamelCase = 1 , lowerCamelCase = 1 , lowerCamelCase = 1 ): '''simple docstring''' if ( isinstance(lowerCamelCase , lowerCamelCase ) or isinstance(lowerCamelCase , lowerCamelCase ) or isinstance(lowerCamelCase , lowerCamelCase ) ): raise TypeError("""inputs must be integers.""" ) if (input_a < 0) or (input_a < 0) or (carry_in < 0): raise ValueError("""inputs must be positive.""" ) if ( (math.floor(lowerCamelCase ) != input_a) or (math.floor(lowerCamelCase ) != input_a) or (math.floor(lowerCamelCase ) != carry_in) ): raise ValueError("""inputs must be exact integers.""" ) if (input_a > 2) or (input_a > 2) or (carry_in > 2): raise ValueError("""inputs must be less or equal to 2.""" ) # build registers __lowercase = qiskit.QuantumRegister(4 , """qr""" ) __lowercase = qiskit.ClassicalRegister(2 , """cr""" ) # list the entries __lowercase = [input_a, input_a, carry_in] __lowercase = qiskit.QuantumCircuit(lowerCamelCase , lowerCamelCase ) for i in range(0 , 3 ): if entry[i] == 2: quantum_circuit.h(lowerCamelCase ) # for hadamard entries elif entry[i] == 1: quantum_circuit.x(lowerCamelCase ) # for 1 entries elif entry[i] == 0: quantum_circuit.i(lowerCamelCase ) # for 0 entries # build the circuit quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate quantum_circuit.cx(0 , 1 ) quantum_circuit.ccx(1 , 2 , 3 ) quantum_circuit.cx(1 , 2 ) quantum_circuit.cx(0 , 1 ) quantum_circuit.measure([2, 3] , lowerCamelCase ) # measure the last two qbits __lowercase = qiskit.Aer.get_backend("""aer_simulator""" ) __lowercase = qiskit.execute(lowerCamelCase , lowerCamelCase , shots=1_000 ) return job.result().get_counts(lowerCamelCase ) if __name__ == "__main__": print(F'''Total sum count for state is: {quantum_full_adder(1, 1, 1)}''')
53
import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() __UpperCamelCase : str = logging.get_logger(__name__) def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = RobertaPreLayerNormConfig.from_pretrained( lowerCamelCase , architectures=["""RobertaPreLayerNormForMaskedLM"""] ) # convert state_dict __lowercase = torch.load(hf_hub_download(repo_id=lowerCamelCase , filename="""pytorch_model.bin""" ) ) __lowercase = {} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith("""roberta.""" ): __lowercase = """roberta_prelayernorm.""" + tensor_key[len("""roberta.""" ) :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith(""".self.LayerNorm.weight""" ) or tensor_key.endswith(""".self.LayerNorm.bias""" ): continue __lowercase = tensor_value __lowercase = RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=lowerCamelCase , config=lowerCamelCase , state_dict=lowerCamelCase ) model.save_pretrained(lowerCamelCase ) # convert tokenizer __lowercase = AutoTokenizer.from_pretrained(lowerCamelCase ) tokenizer.save_pretrained(lowerCamelCase ) if __name__ == "__main__": __UpperCamelCase : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint-repo""", default=None, type=str, required=True, help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) __UpperCamelCase : Dict = parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
53
1
def snake_case ( lowerCamelCase = 10 ): '''simple docstring''' if not isinstance(lowerCamelCase , lowerCamelCase ) or n < 0: raise ValueError("""Invalid input""" ) __lowercase = 10**n __lowercase = 28_433 * (pow(2 , 7_830_457 , lowerCamelCase )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(F'''{solution(10) = }''')
53
import numpy as np from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' if (ksize % 2) == 0: __lowercase = ksize + 1 __lowercase = np.zeros((ksize, ksize) , dtype=np.floataa ) # each value for y in range(lowerCamelCase ): for x in range(lowerCamelCase ): # distance from center __lowercase = x - ksize // 2 __lowercase = y - ksize // 2 # degree to radiant __lowercase = theta / 180 * np.pi __lowercase = np.cos(_theta ) __lowercase = np.sin(_theta ) # get kernel x __lowercase = cos_theta * px + sin_theta * py # get kernel y __lowercase = -sin_theta * px + cos_theta * py # fill kernel __lowercase = np.exp( -(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi ) return gabor if __name__ == "__main__": import doctest doctest.testmod() # read original image __UpperCamelCase : List[Any] = imread("""../image_data/lena.jpg""") # turn image in gray scale value __UpperCamelCase : Union[str, Any] = cvtColor(img, COLOR_BGR2GRAY) # Apply multiple Kernel to detect edges __UpperCamelCase : Union[str, Any] = np.zeros(gray.shape[:2]) for theta in [0, 30, 60, 90, 120, 150]: __UpperCamelCase : Tuple = gabor_filter_kernel(10, 8, theta, 10, 0, 0) out += filteraD(gray, CV_8UC3, kernel_aa) __UpperCamelCase : List[str] = out / out.max() * 255 __UpperCamelCase : List[str] = out.astype(np.uinta) imshow("""Original""", gray) imshow("""Gabor filter with 20x20 mask and 6 directions""", out) waitKey(0)
53
1
def snake_case ( lowerCamelCase ): '''simple docstring''' __lowercase = [0 for i in range(len(lowerCamelCase ) )] # initialize interval's left pointer and right pointer __lowercase , __lowercase = 0, 0 for i in range(1 , len(lowerCamelCase ) ): # case when current index is inside the interval if i <= right_pointer: __lowercase = min(right_pointer - i + 1 , z_result[i - left_pointer] ) __lowercase = min_edge while go_next(lowerCamelCase , lowerCamelCase , lowerCamelCase ): z_result[i] += 1 # if new index's result gives us more right interval, # we've to update left_pointer and right_pointer if i + z_result[i] - 1 > right_pointer: __lowercase , __lowercase = i, i + z_result[i] - 1 return z_result def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' return i + z_result[i] < len(lowerCamelCase ) and s[z_result[i]] == s[i + z_result[i]] def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = 0 # concatenate 'pattern' and 'input_str' and call z_function # with concatenated string __lowercase = z_function(pattern + input_str ) for val in z_result: # if value is greater then length of the pattern string # that means this index is starting position of substring # which is equal to pattern string if val >= len(lowerCamelCase ): answer += 1 return answer if __name__ == "__main__": import doctest doctest.testmod()
53
import argparse import json import os import time import zipfile from get_ci_error_statistics import download_artifact, get_artifacts_links from transformers import logging __UpperCamelCase : Optional[Any] = logging.get_logger(__name__) def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = set() __lowercase = [] def parse_line(lowerCamelCase ): for line in fp: if isinstance(lowerCamelCase , lowerCamelCase ): __lowercase = line.decode("""UTF-8""" ) if "warnings summary (final)" in line: continue # This means we are outside the body of a warning elif not line.startswith(""" """ ): # process a single warning and move it to `selected_warnings`. if len(lowerCamelCase ) > 0: __lowercase = """\n""".join(lowerCamelCase ) # Only keep the warnings specified in `targets` if any(F': {x}: ' in warning for x in targets ): selected_warnings.add(lowerCamelCase ) buffer.clear() continue else: __lowercase = line.strip() buffer.append(lowerCamelCase ) if from_gh: for filename in os.listdir(lowerCamelCase ): __lowercase = os.path.join(lowerCamelCase , lowerCamelCase ) if not os.path.isdir(lowerCamelCase ): # read the file if filename != "warnings.txt": continue with open(lowerCamelCase ) as fp: parse_line(lowerCamelCase ) else: try: with zipfile.ZipFile(lowerCamelCase ) as z: for filename in z.namelist(): if not os.path.isdir(lowerCamelCase ): # read the file if filename != "warnings.txt": continue with z.open(lowerCamelCase ) as fp: parse_line(lowerCamelCase ) except Exception: logger.warning( F'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' ) return selected_warnings def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = set() __lowercase = [os.path.join(lowerCamelCase , lowerCamelCase ) for p in os.listdir(lowerCamelCase ) if (p.endswith(""".zip""" ) or from_gh)] for p in paths: selected_warnings.update(extract_warnings_from_single_artifact(lowerCamelCase , lowerCamelCase ) ) return selected_warnings if __name__ == "__main__": def snake_case ( lowerCamelCase ): '''simple docstring''' return values.split(""",""" ) __UpperCamelCase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""") parser.add_argument( """--output_dir""", type=str, required=True, help="""Where to store the downloaded artifacts and other result files.""", ) parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""") # optional parameters parser.add_argument( """--targets""", default="""DeprecationWarning,UserWarning,FutureWarning""", type=list_str, help="""Comma-separated list of target warning(s) which we want to extract.""", ) parser.add_argument( """--from_gh""", action="""store_true""", help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""", ) __UpperCamelCase : List[str] = parser.parse_args() __UpperCamelCase : Union[str, Any] = args.from_gh if from_gh: # The artifacts have to be downloaded using `actions/download-artifact@v3` pass else: os.makedirs(args.output_dir, exist_ok=True) # get download links __UpperCamelCase : Any = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) # download artifacts for idx, (name, url) in enumerate(artifacts.items()): print(name) print(url) print("""=""" * 80) download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) # extract warnings from artifacts __UpperCamelCase : Union[str, Any] = extract_warnings(args.output_dir, args.targets) __UpperCamelCase : Any = sorted(selected_warnings) with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
53
1
from unittest.mock import Mock, patch from file_transfer.send_file import send_file @patch("""socket.socket""" ) @patch("""builtins.open""" ) def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = Mock() __lowercase = conn, Mock() __lowercase = iter([1, None] ) __lowercase = lambda lowerCamelCase : next(lowerCamelCase ) # ===== invoke ===== send_file(filename="""mytext.txt""" , testing=lowerCamelCase ) # ===== ensurance ===== sock.assert_called_once() sock.return_value.bind.assert_called_once() sock.return_value.listen.assert_called_once() sock.return_value.accept.assert_called_once() conn.recv.assert_called_once() file.return_value.__enter__.assert_called_once() file.return_value.__enter__.return_value.read.assert_called() conn.send.assert_called_once() conn.close.assert_called_once() sock.return_value.shutdown.assert_called_once() sock.return_value.close.assert_called_once()
53
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __UpperCamelCase : Any = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Dict = [ """MRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """MraForMaskedLM""", """MraForMultipleChoice""", """MraForQuestionAnswering""", """MraForSequenceClassification""", """MraForTokenClassification""", """MraLayer""", """MraModel""", """MraPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys __UpperCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
53
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __UpperCamelCase : Optional[Any] = {"""configuration_yolos""": ["""YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """YolosConfig""", """YolosOnnxConfig"""]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Dict = ["""YolosFeatureExtractor"""] __UpperCamelCase : int = ["""YolosImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : int = [ """YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST""", """YolosForObjectDetection""", """YolosModel""", """YolosPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_yolos import YolosFeatureExtractor from .image_processing_yolos import YolosImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_yolos import ( YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST, YolosForObjectDetection, YolosModel, YolosPreTrainedModel, ) else: import sys __UpperCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
53
def snake_case ( lowerCamelCase ): '''simple docstring''' if not isinstance(lowerCamelCase , lowerCamelCase ): raise ValueError("""check_bouncy() accepts only integer arguments""" ) __lowercase = str(lowerCamelCase ) __lowercase = """""".join(sorted(lowerCamelCase ) ) return sorted_str_n != str_n and sorted_str_n[::-1] != str_n def snake_case ( lowerCamelCase = 99 ): '''simple docstring''' if not 0 < percent < 100: raise ValueError("""solution() only accepts values from 0 to 100""" ) __lowercase = 0 __lowercase = 1 while True: if check_bouncy(lowerCamelCase ): bouncy_num += 1 if (bouncy_num / num) * 100 >= percent: return num num += 1 if __name__ == "__main__": from doctest import testmod testmod() print(F'''{solution(99)}''')
53
1
def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = """""" for i in table: res += inp[i - 1] return res def snake_case ( lowerCamelCase ): '''simple docstring''' return data[1:] + data[0] def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = """""" for i in range(len(lowerCamelCase ) ): if a[i] == b[i]: res += "0" else: res += "1" return res def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = int("""0b""" + data[0] + data[-1] , 2 ) __lowercase = int("""0b""" + data[1:3] , 2 ) return bin(s[row][col] )[2:] def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = message[:4] __lowercase = message[4:] __lowercase = apply_table(lowerCamelCase , lowerCamelCase ) __lowercase = xor(lowerCamelCase , lowerCamelCase ) __lowercase = apply_sbox(lowerCamelCase , temp[:4] ) # noqa: E741 __lowercase = apply_sbox(lowerCamelCase , temp[4:] ) __lowercase = """0""" * (2 - len(lowerCamelCase )) + l # noqa: E741 __lowercase = """0""" * (2 - len(lowerCamelCase )) + r __lowercase = apply_table(l + r , lowerCamelCase ) __lowercase = xor(lowerCamelCase , lowerCamelCase ) return temp + right if __name__ == "__main__": __UpperCamelCase : Union[str, Any] = input("""Enter 10 bit key: """) __UpperCamelCase : str = input("""Enter 8 bit message: """) __UpperCamelCase : List[str] = [6, 3, 7, 4, 8, 5, 10, 9] __UpperCamelCase : Tuple = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6] __UpperCamelCase : Optional[Any] = [2, 4, 3, 1] __UpperCamelCase : int = [2, 6, 3, 1, 4, 8, 5, 7] __UpperCamelCase : int = [4, 1, 3, 5, 7, 2, 8, 6] __UpperCamelCase : Dict = [4, 1, 2, 3, 2, 3, 4, 1] __UpperCamelCase : Tuple = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]] __UpperCamelCase : str = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]] # key generation __UpperCamelCase : Dict = apply_table(key, paa_table) __UpperCamelCase : str = temp[:5] __UpperCamelCase : str = temp[5:] __UpperCamelCase : Tuple = left_shift(left) __UpperCamelCase : Union[str, Any] = left_shift(right) __UpperCamelCase : Union[str, Any] = apply_table(left + right, pa_table) __UpperCamelCase : Union[str, Any] = left_shift(left) __UpperCamelCase : int = left_shift(right) __UpperCamelCase : Any = left_shift(left) __UpperCamelCase : List[str] = left_shift(right) __UpperCamelCase : Optional[Any] = apply_table(left + right, pa_table) # encryption __UpperCamelCase : int = apply_table(message, IP) __UpperCamelCase : Dict = function(expansion, sa, sa, keya, temp) __UpperCamelCase : Dict = temp[4:] + temp[:4] __UpperCamelCase : Dict = function(expansion, sa, sa, keya, temp) __UpperCamelCase : Union[str, Any] = apply_table(temp, IP_inv) print("""Cipher text is:""", CT) # decryption __UpperCamelCase : Optional[Any] = apply_table(CT, IP) __UpperCamelCase : Union[str, Any] = function(expansion, sa, sa, keya, temp) __UpperCamelCase : str = temp[4:] + temp[:4] __UpperCamelCase : Tuple = function(expansion, sa, sa, keya, temp) __UpperCamelCase : Tuple = apply_table(temp, IP_inv) print("""Plain text after decypting is:""", PT)
53
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __UpperCamelCase : Tuple = { """configuration_swiftformer""": [ """SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwiftFormerConfig""", """SwiftFormerOnnxConfig""", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Optional[Any] = [ """SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """SwiftFormerForImageClassification""", """SwiftFormerModel""", """SwiftFormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_swiftformer import ( SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, SwiftFormerConfig, SwiftFormerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swiftformer import ( SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, SwiftFormerForImageClassification, SwiftFormerModel, SwiftFormerPreTrainedModel, ) else: import sys __UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
53
1
import itertools import math def snake_case ( lowerCamelCase ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(lowerCamelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def snake_case ( ): '''simple docstring''' __lowercase = 2 while True: if is_prime(lowerCamelCase ): yield num num += 1 def snake_case ( lowerCamelCase = 10_001 ): '''simple docstring''' return next(itertools.islice(prime_generator() , nth - 1 , lowerCamelCase ) ) if __name__ == "__main__": print(F'''{solution() = }''')
53
import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class __UpperCamelCase ( unittest.TestCase ): def _a ( self : Union[str, Any] ) -> Any: """simple docstring""" super().tearDown() gc.collect() def _a ( self : List[str] ) -> int: """simple docstring""" __lowercase , __lowercase = FlaxStableDiffusionPipeline.from_pretrained( """stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , ) __lowercase = """A painting of a squirrel eating a burger""" __lowercase = jax.device_count() __lowercase = num_samples * [prompt] __lowercase = sd_pipe.prepare_inputs(_lowerCAmelCase ) __lowercase = replicate(_lowerCAmelCase ) __lowercase = shard(_lowerCAmelCase ) __lowercase = jax.random.PRNGKey(0 ) __lowercase = jax.random.split(_lowerCAmelCase , jax.device_count() ) __lowercase = sd_pipe(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_inference_steps=25 , jit=_lowerCAmelCase )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) __lowercase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __lowercase = images[0, 253:256, 253:256, -1] __lowercase = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __lowercase = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] ) print(F'output_slice: {output_slice}' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 def _a ( self : str ) -> List[Any]: """simple docstring""" __lowercase = """stabilityai/stable-diffusion-2""" __lowercase , __lowercase = FlaxDPMSolverMultistepScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" ) __lowercase , __lowercase = FlaxStableDiffusionPipeline.from_pretrained( _lowerCAmelCase , scheduler=_lowerCAmelCase , revision="""bf16""" , dtype=jnp.bfloataa , ) __lowercase = scheduler_params __lowercase = """A painting of a squirrel eating a burger""" __lowercase = jax.device_count() __lowercase = num_samples * [prompt] __lowercase = sd_pipe.prepare_inputs(_lowerCAmelCase ) __lowercase = replicate(_lowerCAmelCase ) __lowercase = shard(_lowerCAmelCase ) __lowercase = jax.random.PRNGKey(0 ) __lowercase = jax.random.split(_lowerCAmelCase , jax.device_count() ) __lowercase = sd_pipe(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_inference_steps=25 , jit=_lowerCAmelCase )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) __lowercase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __lowercase = images[0, 253:256, 253:256, -1] __lowercase = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __lowercase = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] ) print(F'output_slice: {output_slice}' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
53
1
import os from bleurt import score # From: git+https://github.com/google-research/bleurt.git import datasets __UpperCamelCase : Optional[int] = datasets.logging.get_logger(__name__) __UpperCamelCase : int = """\ @inproceedings{bleurt, title={BLEURT: Learning Robust Metrics for Text Generation}, author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh}, booktitle={ACL}, year={2020}, url={https://arxiv.org/abs/2004.04696} } """ __UpperCamelCase : List[str] = """\ BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018) and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune it for your specific application (the latter is expected to perform better). See the project's README at https://github.com/google-research/bleurt#readme for more information. """ __UpperCamelCase : Optional[Any] = """ BLEURT score. Args: `predictions` (list of str): prediction/candidate sentences `references` (list of str): reference sentences `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None. Returns: 'scores': List of scores. Examples: >>> predictions = [\"hello there\", \"general kenobi\"] >>> references = [\"hello there\", \"general kenobi\"] >>> bleurt = datasets.load_metric(\"bleurt\") >>> results = bleurt.compute(predictions=predictions, references=references) >>> print([round(v, 2) for v in results[\"scores\"]]) [1.03, 1.04] """ __UpperCamelCase : str = { """bleurt-tiny-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip""", """bleurt-tiny-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip""", """bleurt-base-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip""", """bleurt-base-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip""", """bleurt-large-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip""", """bleurt-large-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip""", """BLEURT-20-D3""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip""", """BLEURT-20-D6""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip""", """BLEURT-20-D12""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip""", """BLEURT-20""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip""", } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __UpperCamelCase ( datasets.Metric ): def _a ( self : List[str] ) -> Any: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/google-research/bleurt""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , codebase_urls=["""https://github.com/google-research/bleurt"""] , reference_urls=["""https://github.com/google-research/bleurt""", """https://arxiv.org/abs/2004.04696"""] , ) def _a ( self : Tuple , _lowerCAmelCase : List[str] ) -> Any: """simple docstring""" if self.config_name == "default": logger.warning( """Using default BLEURT-Base checkpoint for sequence maximum length 128. """ """You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512').""" ) __lowercase = """bleurt-base-128""" if self.config_name.lower() in CHECKPOINT_URLS: __lowercase = self.config_name.lower() elif self.config_name.upper() in CHECKPOINT_URLS: __lowercase = self.config_name.upper() else: raise KeyError( F'{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}' ) # download the model checkpoint specified by self.config_name and set up the scorer __lowercase = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] ) __lowercase = score.BleurtScorer(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) ) def _a ( self : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str] ) -> str: """simple docstring""" __lowercase = self.scorer.score(references=_lowerCAmelCase , candidates=_lowerCAmelCase ) return {"scores": scores}
53
import heapq import sys import numpy as np __UpperCamelCase : List[str] = tuple[int, int] class __UpperCamelCase : def __init__( self : Optional[int] ) -> Dict: """simple docstring""" __lowercase = [] __lowercase = set() def _a ( self : int ) -> List[Any]: """simple docstring""" if not self.empty(): return self.elements[0][0] else: return float("""inf""" ) def _a ( self : Dict ) -> Optional[Any]: """simple docstring""" return len(self.elements ) == 0 def _a ( self : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] ) -> Dict: """simple docstring""" if item not in self.set: heapq.heappush(self.elements , (priority, item) ) self.set.add(_lowerCAmelCase ) else: # update # print("update", item) __lowercase = [] ((__lowercase) , (__lowercase)) = heapq.heappop(self.elements ) while x != item: temp.append((pri, x) ) ((__lowercase) , (__lowercase)) = heapq.heappop(self.elements ) temp.append((priority, item) ) for pro, xxx in temp: heapq.heappush(self.elements , (pro, xxx) ) def _a ( self : List[str] , _lowerCAmelCase : Optional[Any] ) -> Dict: """simple docstring""" if item in self.set: self.set.remove(_lowerCAmelCase ) __lowercase = [] ((__lowercase) , (__lowercase)) = heapq.heappop(self.elements ) while x != item: temp.append((pro, x) ) ((__lowercase) , (__lowercase)) = heapq.heappop(self.elements ) for prito, yyy in temp: heapq.heappush(self.elements , (prito, yyy) ) def _a ( self : Any ) -> List[Any]: """simple docstring""" return self.elements[0][1] def _a ( self : Optional[int] ) -> List[Any]: """simple docstring""" ((__lowercase) , (__lowercase)) = heapq.heappop(self.elements ) self.set.remove(_lowerCAmelCase ) return (priority, item) def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = np.array(lowerCamelCase ) __lowercase = np.array(lowerCamelCase ) return np.linalg.norm(a - b ) def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' return consistent_heuristic(lowerCamelCase , lowerCamelCase ) // t def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] ) def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = g_function[start] + Wa * heuristics[i](lowerCamelCase , lowerCamelCase ) return ans def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = np.chararray((n, n) ) for i in range(lowerCamelCase ): for j in range(lowerCamelCase ): __lowercase = """*""" for i in range(lowerCamelCase ): for j in range(lowerCamelCase ): if (j, (n - 1) - i) in blocks: __lowercase = """#""" __lowercase = """-""" __lowercase = back_pointer[goal] while x != start: ((__lowercase) , (__lowercase)) = x # print(x) __lowercase = """-""" __lowercase = back_pointer[x] __lowercase = """-""" for i in range(lowerCamelCase ): for j in range(lowerCamelCase ): if (i, j) == (0, n - 1): print(grid[i][j] , end=""" """ ) print("""<-- End position""" , end=""" """ ) else: print(grid[i][j] , end=""" """ ) print() print("""^""" ) print("""Start position""" ) print() print("""# is an obstacle""" ) print("""- is the path taken by algorithm""" ) print("""PATH TAKEN BY THE ALGORITHM IS:-""" ) __lowercase = back_pointer[goal] while x != start: print(lowerCamelCase , end=""" """ ) __lowercase = back_pointer[x] print(lowerCamelCase ) sys.exit() def snake_case ( lowerCamelCase ): '''simple docstring''' if p[0] < 0 or p[0] > n - 1: return False if p[1] < 0 or p[1] > n - 1: return False return True def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ): '''simple docstring''' for itera in range(lowerCamelCase ): open_list[itera].remove_element(lowerCamelCase ) # print("s", s) # print("j", j) ((__lowercase) , (__lowercase)) = s __lowercase = (x - 1, y) __lowercase = (x + 1, y) __lowercase = (x, y + 1) __lowercase = (x, y - 1) for neighbours in [left, right, up, down]: if neighbours not in blocks: if valid(lowerCamelCase ) and neighbours not in visited: # print("neighbour", neighbours) visited.add(lowerCamelCase ) __lowercase = -1 __lowercase = float("""inf""" ) if valid(lowerCamelCase ) and g_function[neighbours] > g_function[s] + 1: __lowercase = g_function[s] + 1 __lowercase = s if neighbours not in close_list_anchor: open_list[0].put(lowerCamelCase , key(lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase ) ) if neighbours not in close_list_inad: for var in range(1 , lowerCamelCase ): if key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) <= Wa * key( lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase ): open_list[j].put( lowerCamelCase , key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) ) def snake_case ( ): '''simple docstring''' __lowercase = [] for x in range(1 , 5 ): for y in range(1 , 6 ): some_list.append((x, y) ) for x in range(15 , 20 ): some_list.append((x, 17) ) for x in range(10 , 19 ): for y in range(1 , 15 ): some_list.append((x, y) ) # L block for x in range(1 , 4 ): for y in range(12 , 19 ): some_list.append((x, y) ) for x in range(3 , 13 ): for y in range(16 , 19 ): some_list.append((x, y) ) return some_list __UpperCamelCase : Optional[int] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a} __UpperCamelCase : Optional[Any] = [ (0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1), (11, 1), (12, 1), (13, 1), (14, 1), (15, 1), (16, 1), (17, 1), (18, 1), (19, 1), ] __UpperCamelCase : Optional[Any] = make_common_ground() __UpperCamelCase : Dict = blocks_blk # hyper parameters __UpperCamelCase : Union[str, Any] = 1 __UpperCamelCase : Union[str, Any] = 1 __UpperCamelCase : Optional[int] = 20 __UpperCamelCase : List[str] = 3 # one consistent and two other inconsistent # start and end destination __UpperCamelCase : str = (0, 0) __UpperCamelCase : str = (n - 1, n - 1) __UpperCamelCase : Optional[Any] = 1 def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = {start: 0, goal: float("""inf""" )} __lowercase = {start: -1, goal: -1} __lowercase = [] __lowercase = set() for i in range(lowerCamelCase ): open_list.append(PriorityQueue() ) open_list[i].put(lowerCamelCase , key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) ) __lowercase = [] __lowercase = [] while open_list[0].minkey() < float("""inf""" ): for i in range(1 , lowerCamelCase ): # print(open_list[0].minkey(), open_list[i].minkey()) if open_list[i].minkey() <= Wa * open_list[0].minkey(): global t t += 1 if g_function[goal] <= open_list[i].minkey(): if g_function[goal] < float("""inf""" ): do_something(lowerCamelCase , lowerCamelCase , lowerCamelCase ) else: __lowercase , __lowercase = open_list[i].top_show() visited.add(lowerCamelCase ) expand_state( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) close_list_inad.append(lowerCamelCase ) else: if g_function[goal] <= open_list[0].minkey(): if g_function[goal] < float("""inf""" ): do_something(lowerCamelCase , lowerCamelCase , lowerCamelCase ) else: __lowercase = open_list[0].top_show() visited.add(lowerCamelCase ) expand_state( lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) close_list_anchor.append(lowerCamelCase ) print("""No path found to goal""" ) print() for i in range(n - 1 , -1 , -1 ): for j in range(lowerCamelCase ): if (j, i) in blocks: print("""#""" , end=""" """ ) elif (j, i) in back_pointer: if (j, i) == (n - 1, n - 1): print("""*""" , end=""" """ ) else: print("""-""" , end=""" """ ) else: print("""*""" , end=""" """ ) if (j, i) == (n - 1, n - 1): print("""<-- End position""" , end=""" """ ) print() print("""^""" ) print("""Start position""" ) print() print("""# is an obstacle""" ) print("""- is the path taken by algorithm""" ) if __name__ == "__main__": multi_a_star(start, goal, n_heuristic)
53
1
import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs __UpperCamelCase : List[Any] = imread(r"""digital_image_processing/image_data/lena_small.jpg""") __UpperCamelCase : Any = cvtColor(img, COLOR_BGR2GRAY) def snake_case ( ): '''simple docstring''' __lowercase = cn.convert_to_negative(lowerCamelCase ) # assert negative_img array for at least one True assert negative_img.any() def snake_case ( ): '''simple docstring''' with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img: # Work around assertion for response assert str(cc.change_contrast(lowerCamelCase , 110 ) ).startswith( """<PIL.Image.Image image mode=RGB size=100x100 at""" ) def snake_case ( ): '''simple docstring''' __lowercase = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def snake_case ( ): '''simple docstring''' __lowercase = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 ) # assert ambiguous array for all == True assert canny_img.all() __lowercase = canny.canny(lowerCamelCase ) # assert canny array for at least one True assert canny_array.any() def snake_case ( ): '''simple docstring''' assert gg.gaussian_filter(lowerCamelCase , 5 , sigma=0.9 ).all() def snake_case ( ): '''simple docstring''' __lowercase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) __lowercase = conv.img_convolve(lowerCamelCase , lowerCamelCase ).astype(lowerCamelCase ) assert res.any() def snake_case ( ): '''simple docstring''' assert med.median_filter(lowerCamelCase , 3 ).any() def snake_case ( ): '''simple docstring''' __lowercase , __lowercase = sob.sobel_filter(lowerCamelCase ) assert grad.any() and theta.any() def snake_case ( ): '''simple docstring''' __lowercase = sp.make_sepia(lowerCamelCase , 20 ) assert sepia.all() def snake_case ( lowerCamelCase = "digital_image_processing/image_data/lena_small.jpg" ): '''simple docstring''' __lowercase = bs.Burkes(imread(lowerCamelCase , 1 ) , 120 ) burkes.process() assert burkes.output_img.any() def snake_case ( lowerCamelCase = "digital_image_processing/image_data/lena_small.jpg" , ): '''simple docstring''' __lowercase = rs.NearestNeighbour(imread(lowerCamelCase , 1 ) , 400 , 200 ) nn.process() assert nn.output.any() def snake_case ( ): '''simple docstring''' __lowercase = """digital_image_processing/image_data/lena.jpg""" # Reading the image and converting it to grayscale. __lowercase = imread(lowerCamelCase , 0 ) # Test for get_neighbors_pixel function() return not None __lowercase = 0 __lowercase = 0 __lowercase = image[x_coordinate][y_coordinate] __lowercase = lbp.get_neighbors_pixel( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image __lowercase = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): __lowercase = lbp.local_binary_value(lowerCamelCase , lowerCamelCase , lowerCamelCase ) assert lbp_image.any()
53
import argparse import json import pickle from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() __UpperCamelCase : Optional[int] = logging.get_logger(__name__) def snake_case ( lowerCamelCase ): '''simple docstring''' __lowercase = SwinConfig.from_pretrained( """microsoft/swin-tiny-patch4-window7-224""" , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ) __lowercase = MaskFormerConfig(backbone_config=lowerCamelCase ) __lowercase = """huggingface/label-files""" if "ade20k-full" in model_name: # this should be ok __lowercase = 847 __lowercase = """maskformer-ade20k-full-id2label.json""" elif "ade" in model_name: # this should be ok __lowercase = 150 __lowercase = """ade20k-id2label.json""" elif "coco-stuff" in model_name: # this should be ok __lowercase = 171 __lowercase = """maskformer-coco-stuff-id2label.json""" elif "coco" in model_name: # TODO __lowercase = 133 __lowercase = """coco-panoptic-id2label.json""" elif "cityscapes" in model_name: # this should be ok __lowercase = 19 __lowercase = """cityscapes-id2label.json""" elif "vistas" in model_name: # this should be ok __lowercase = 65 __lowercase = """mapillary-vistas-id2label.json""" __lowercase = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""" ) , """r""" ) ) __lowercase = {int(lowerCamelCase ): v for k, v in idalabel.items()} return config def snake_case ( lowerCamelCase ): '''simple docstring''' __lowercase = [] # stem # fmt: off rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") ) rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") ) rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") ) rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') ) if i < 3: rename_keys.append((F'backbone.layers.{i}.downsample.reduction.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') ) rename_keys.append((F'backbone.layers.{i}.downsample.norm.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') ) rename_keys.append((F'backbone.layers.{i}.downsample.norm.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') ) rename_keys.append((F'backbone.norm{i}.weight', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') ) rename_keys.append((F'backbone.norm{i}.bias', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') ) # FPN rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") ) rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") ) rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") ) for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ): rename_keys.append((F'sem_seg_head.adapter_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') ) rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') ) rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') ) rename_keys.append((F'sem_seg_head.layer_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') ) rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') ) rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') ) rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") ) rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") ) # Transformer decoder for idx in range(config.decoder_config.decoder_layers ): # self-attention out projection rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') ) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') ) # cross-attention out projection rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') ) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') ) # MLP 1 rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', F'model.transformer_module.decoder.layers.{idx}.fc1.weight') ) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', F'model.transformer_module.decoder.layers.{idx}.fc1.bias') ) # MLP 2 rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', F'model.transformer_module.decoder.layers.{idx}.fc2.weight') ) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', F'model.transformer_module.decoder.layers.{idx}.fc2.bias') ) # layernorm 1 (self-attention layernorm) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') ) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') ) # layernorm 2 (cross-attention layernorm) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') ) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') ) # layernorm 3 (final layernorm) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') ) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') ) rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") ) rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") ) # heads on top rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") ) rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") ) rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") ) rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") ) rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") ) for i in range(3 ): rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.weight', F'mask_embedder.{i}.0.weight') ) rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.bias', F'mask_embedder.{i}.0.bias') ) # fmt: on return rename_keys def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = dct.pop(lowerCamelCase ) __lowercase = val def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): __lowercase = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) __lowercase = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' ) __lowercase = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict __lowercase = in_proj_weight[:dim, :] __lowercase = in_proj_bias[: dim] __lowercase = in_proj_weight[ dim : dim * 2, : ] __lowercase = in_proj_bias[ dim : dim * 2 ] __lowercase = in_proj_weight[ -dim :, : ] __lowercase = in_proj_bias[-dim :] # fmt: on def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = config.decoder_config.hidden_size for idx in range(config.decoder_config.decoder_layers ): # read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias) __lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' ) __lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict __lowercase = in_proj_weight[: hidden_size, :] __lowercase = in_proj_bias[:config.hidden_size] __lowercase = in_proj_weight[hidden_size : hidden_size * 2, :] __lowercase = in_proj_bias[hidden_size : hidden_size * 2] __lowercase = in_proj_weight[-hidden_size :, :] __lowercase = in_proj_bias[-hidden_size :] # read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias) __lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' ) __lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict __lowercase = in_proj_weight[: hidden_size, :] __lowercase = in_proj_bias[:config.hidden_size] __lowercase = in_proj_weight[hidden_size : hidden_size * 2, :] __lowercase = in_proj_bias[hidden_size : hidden_size * 2] __lowercase = in_proj_weight[-hidden_size :, :] __lowercase = in_proj_bias[-hidden_size :] # fmt: on def snake_case ( ): '''simple docstring''' __lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg""" __lowercase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw ) return im @torch.no_grad() def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = False ): '''simple docstring''' __lowercase = get_maskformer_config(lowerCamelCase ) # load original state_dict with open(lowerCamelCase , """rb""" ) as f: __lowercase = pickle.load(lowerCamelCase ) __lowercase = data["""model"""] # for name, param in state_dict.items(): # print(name, param.shape) # rename keys __lowercase = create_rename_keys(lowerCamelCase ) for src, dest in rename_keys: rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase ) read_in_swin_q_k_v(lowerCamelCase , config.backbone_config ) read_in_decoder_q_k_v(lowerCamelCase , lowerCamelCase ) # update to torch tensors for key, value in state_dict.items(): __lowercase = torch.from_numpy(lowerCamelCase ) # load 🤗 model __lowercase = MaskFormerForInstanceSegmentation(lowerCamelCase ) model.eval() for name, param in model.named_parameters(): print(lowerCamelCase , param.shape ) __lowercase , __lowercase = model.load_state_dict(lowerCamelCase , strict=lowerCamelCase ) assert missing_keys == [ "model.pixel_level_module.encoder.model.layernorm.weight", "model.pixel_level_module.encoder.model.layernorm.bias", ] assert len(lowerCamelCase ) == 0, F'Unexpected keys: {unexpected_keys}' # verify results __lowercase = prepare_img() if "vistas" in model_name: __lowercase = 65 elif "cityscapes" in model_name: __lowercase = 65_535 else: __lowercase = 255 __lowercase = True if """ade""" in model_name else False __lowercase = MaskFormerImageProcessor(ignore_index=lowerCamelCase , reduce_labels=lowerCamelCase ) __lowercase = image_processor(lowerCamelCase , return_tensors="""pt""" ) __lowercase = model(**lowerCamelCase ) print("""Logits:""" , outputs.class_queries_logits[0, :3, :3] ) if model_name == "maskformer-swin-tiny-ade": __lowercase = torch.tensor( [[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] ) assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase , atol=1e-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(F'Saving model and image processor to {pytorch_dump_folder_path}' ) Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase ) model.save_pretrained(lowerCamelCase ) image_processor.save_pretrained(lowerCamelCase ) if push_to_hub: print("""Pushing model and image processor to the hub...""" ) model.push_to_hub(F'nielsr/{model_name}' ) image_processor.push_to_hub(F'nielsr/{model_name}' ) if __name__ == "__main__": __UpperCamelCase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""maskformer-swin-tiny-ade""", type=str, help=("""Name of the MaskFormer model you'd like to convert""",), ) parser.add_argument( """--checkpoint_path""", default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""", type=str, help="""Path to the original state dict (.pth file).""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) __UpperCamelCase : List[Any] = parser.parse_args() convert_maskformer_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
53
1
import torch from diffusers import StableDiffusionPipeline __UpperCamelCase : Union[str, Any] = """path-to-your-trained-model""" __UpperCamelCase : List[Any] = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""") __UpperCamelCase : List[Any] = """A photo of sks dog in a bucket""" __UpperCamelCase : str = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0] image.save("""dog-bucket.png""")
53
from math import sqrt def snake_case ( lowerCamelCase ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(sqrt(lowerCamelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def snake_case ( lowerCamelCase = 10_001 ): '''simple docstring''' __lowercase = 0 __lowercase = 1 while count != nth and number < 3: number += 1 if is_prime(lowerCamelCase ): count += 1 while count != nth: number += 2 if is_prime(lowerCamelCase ): count += 1 return number if __name__ == "__main__": print(F'''{solution() = }''')
53
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __UpperCamelCase : Optional[Any] = { """configuration_mega""": ["""MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegaConfig""", """MegaOnnxConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Optional[Any] = [ """MEGA_PRETRAINED_MODEL_ARCHIVE_LIST""", """MegaForCausalLM""", """MegaForMaskedLM""", """MegaForMultipleChoice""", """MegaForQuestionAnswering""", """MegaForSequenceClassification""", """MegaForTokenClassification""", """MegaModel""", """MegaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mega import ( MEGA_PRETRAINED_MODEL_ARCHIVE_LIST, MegaForCausalLM, MegaForMaskedLM, MegaForMultipleChoice, MegaForQuestionAnswering, MegaForSequenceClassification, MegaForTokenClassification, MegaModel, MegaPreTrainedModel, ) else: import sys __UpperCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
53
from __future__ import annotations import collections import tempfile import unittest import numpy as np from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import is_tf_available, is_vision_available from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_tf_bert import TFBertModelTester from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester from ..deit.test_modeling_tf_deit import TFDeiTModelTester from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester from ..vit.test_modeling_tf_vit import TFViTModelTester if is_tf_available(): from transformers import ( TFBertModel, TFCLIPVisionModel, TFDeiTModel, TFRobertaModel, TFVisionTextDualEncoderModel, TFViTModel, VisionTextDualEncoderConfig, ) if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor def snake_case ( lowerCamelCase ): '''simple docstring''' if isinstance(lowerCamelCase , collections.abc.Iterable ): return x return (x, x) @require_tf class __UpperCamelCase : def _a ( self : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] ) -> Union[str, Any]: """simple docstring""" pass def _a ( self : Dict ) -> Optional[int]: """simple docstring""" pass def _a ( self : Any ) -> Optional[Any]: """simple docstring""" pass def _a ( self : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict=None , **_lowerCAmelCase : int ) -> str: """simple docstring""" __lowercase = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCAmelCase , _lowerCAmelCase ) __lowercase = TFVisionTextDualEncoderModel(_lowerCAmelCase ) __lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) ) def _a ( self : str , _lowerCAmelCase : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : int=None , **_lowerCAmelCase : Dict ) -> Tuple: """simple docstring""" __lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase ) __lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase ) __lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) ) def _a ( self : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : str , _lowerCAmelCase : Any=None , **_lowerCAmelCase : str ) -> Tuple: """simple docstring""" __lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase ) __lowercase = {"""vision_model""": vision_model, """text_model""": text_model} __lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCAmelCase ) __lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) ) def _a ( self : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any]=None , **_lowerCAmelCase : Any ) -> Optional[int]: """simple docstring""" __lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase ) __lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase ) __lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase ) __lowercase = output[0].numpy() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_lowerCAmelCase ) __lowercase = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase ) __lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase ) __lowercase = after_output[0].numpy() __lowercase = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_lowerCAmelCase , 1e-5 ) def _a ( self : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int]=None , **_lowerCAmelCase : Optional[int] ) -> List[Any]: """simple docstring""" __lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase ) __lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase ) __lowercase = model( input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase ) __lowercase = output.vision_model_output.attentions self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) __lowercase = to_atuple(vision_model.config.image_size ) __lowercase = to_atuple(vision_model.config.patch_size ) __lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) __lowercase = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) __lowercase = output.text_model_output.attentions self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def _a ( self : List[Any] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : float ) -> Optional[int]: """simple docstring""" __lowercase = np.abs((a - b) ).max() self.assertLessEqual(_lowerCAmelCase , _lowerCAmelCase , F'Difference between torch and flax is {diff} (>= {tol}).' ) def _a ( self : List[Any] ) -> List[Any]: """simple docstring""" __lowercase = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_model(**_lowerCAmelCase ) def _a ( self : int ) -> List[Any]: """simple docstring""" __lowercase = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**_lowerCAmelCase ) def _a ( self : Union[str, Any] ) -> int: """simple docstring""" __lowercase = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**_lowerCAmelCase ) def _a ( self : List[str] ) -> str: """simple docstring""" __lowercase = self.prepare_config_and_inputs() self.check_save_load(**_lowerCAmelCase ) def _a ( self : Dict ) -> Union[str, Any]: """simple docstring""" __lowercase = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**_lowerCAmelCase ) @slow def _a ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" __lowercase , __lowercase = self.get_pretrained_model_and_inputs() __lowercase = model_a(**_lowerCAmelCase ) __lowercase = outputs[0].numpy() with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(_lowerCAmelCase ) __lowercase = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase ) __lowercase = model_a(**_lowerCAmelCase ) __lowercase = after_outputs[0].numpy() __lowercase = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_lowerCAmelCase , 1e-5 ) @require_tf class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ): def _a ( self : Optional[int] ) -> List[str]: """simple docstring""" __lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" ) __lowercase = 13 __lowercase = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) __lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) __lowercase = random_attention_mask([batch_size, 4] ) __lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def _a ( self : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] ) -> List[str]: """simple docstring""" __lowercase = TFViTModel(_lowerCAmelCase , name="""vision_model""" ) __lowercase = TFBertModel(_lowerCAmelCase , name="""text_model""" ) return vision_model, text_model def _a ( self : Dict ) -> Optional[Any]: """simple docstring""" __lowercase = TFViTModelTester(self ) __lowercase = TFBertModelTester(self ) __lowercase = vit_model_tester.prepare_config_and_inputs() __lowercase = bert_model_tester.prepare_config_and_inputs() __lowercase , __lowercase , __lowercase = vision_config_and_inputs ( ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ): def _a ( self : Tuple ) -> Any: """simple docstring""" __lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained( """Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" ) __lowercase = 13 __lowercase = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) __lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) __lowercase = random_attention_mask([batch_size, 4] ) __lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def _a ( self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : int=None , **_lowerCAmelCase : Tuple ) -> Dict: """simple docstring""" __lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase ) __lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase ) __lowercase = model( input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase ) __lowercase = output.vision_model_output.attentions self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers ) # in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) __lowercase = to_atuple(vision_model.config.image_size ) __lowercase = to_atuple(vision_model.config.patch_size ) __lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) __lowercase = num_patches + 2 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) __lowercase = output.text_model_output.attentions self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def _a ( self : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict ) -> int: """simple docstring""" __lowercase = TFDeiTModel(_lowerCAmelCase , name="""vision_model""" ) __lowercase = TFRobertaModel(_lowerCAmelCase , name="""text_model""" ) return vision_model, text_model def _a ( self : Tuple ) -> str: """simple docstring""" __lowercase = TFDeiTModelTester(self ) __lowercase = TFRobertaModelTester(self ) __lowercase = vit_model_tester.prepare_config_and_inputs() __lowercase = bert_model_tester.prepare_config_and_inputs() __lowercase , __lowercase , __lowercase = vision_config_and_inputs ( ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ): def _a ( self : int ) -> Union[str, Any]: """simple docstring""" __lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained( """Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" ) __lowercase = 13 __lowercase = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) __lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) __lowercase = random_attention_mask([batch_size, 4] ) __lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def _a ( self : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] ) -> Dict: """simple docstring""" __lowercase = TFCLIPVisionModel(_lowerCAmelCase , name="""vision_model""" ) __lowercase = TFBertModel(_lowerCAmelCase , name="""text_model""" ) return vision_model, text_model def _a ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" __lowercase = TFCLIPVisionModelTester(self ) __lowercase = TFBertModelTester(self ) __lowercase = clip_model_tester.prepare_config_and_inputs() __lowercase = bert_model_tester.prepare_config_and_inputs() __lowercase , __lowercase = vision_config_and_inputs ( ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_vision @require_tf class __UpperCamelCase ( unittest.TestCase ): @slow def _a ( self : int ) -> Tuple: """simple docstring""" __lowercase = TFVisionTextDualEncoderModel.from_pretrained( """clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=_lowerCAmelCase ) __lowercase = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" ) __lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) __lowercase = processor( text=["""una foto di un gatto""", """una foto di un cane"""] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="""np""" ) __lowercase = model(**_lowerCAmelCase ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) __lowercase = np.array([[1.2_284_727, 0.3_104_122]] ) self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _lowerCAmelCase , atol=1e-3 ) )
53
1
import os import time import warnings from dataclasses import dataclass, field from enum import Enum from typing import List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import logging from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors from ..processors.utils import InputFeatures __UpperCamelCase : Any = logging.get_logger(__name__) @dataclass class __UpperCamelCase : __snake_case :str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys() )} ) __snake_case :str = field( metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} ) __snake_case :int = field( default=1_2_8 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) __snake_case :bool = field( default=_lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) def _a ( self : Dict ) -> List[Any]: """simple docstring""" __lowercase = self.task_name.lower() class __UpperCamelCase ( _lowerCAmelCase ): __snake_case :Optional[int] = 'train' __snake_case :int = 'dev' __snake_case :Any = 'test' class __UpperCamelCase ( _lowerCAmelCase ): __snake_case :GlueDataTrainingArguments __snake_case :str __snake_case :List[InputFeatures] def __init__( self : Dict , _lowerCAmelCase : GlueDataTrainingArguments , _lowerCAmelCase : PreTrainedTokenizerBase , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Union[str, Split] = Split.train , _lowerCAmelCase : Optional[str] = None , ) -> List[Any]: """simple docstring""" warnings.warn( """This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """ """library. You can have a look at this example script for pointers: """ """https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , _lowerCAmelCase , ) __lowercase = args __lowercase = glue_processors[args.task_name]() __lowercase = glue_output_modes[args.task_name] if isinstance(_lowerCAmelCase , _lowerCAmelCase ): try: __lowercase = Split[mode] except KeyError: raise KeyError("""mode is not a valid split name""" ) # Load data features from cache or dataset file __lowercase = os.path.join( cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , ) __lowercase = self.processor.get_labels() if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in ( "RobertaTokenizer", "RobertaTokenizerFast", "XLMRobertaTokenizer", "BartTokenizer", "BartTokenizerFast", ): # HACK(label indices are swapped in RoBERTa pretrained model) __lowercase , __lowercase = label_list[2], label_list[1] __lowercase = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. __lowercase = cached_features_file + """.lock""" with FileLock(_lowerCAmelCase ): if os.path.exists(_lowerCAmelCase ) and not args.overwrite_cache: __lowercase = time.time() __lowercase = torch.load(_lowerCAmelCase ) logger.info( F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start ) else: logger.info(F'Creating features from dataset file at {args.data_dir}' ) if mode == Split.dev: __lowercase = self.processor.get_dev_examples(args.data_dir ) elif mode == Split.test: __lowercase = self.processor.get_test_examples(args.data_dir ) else: __lowercase = self.processor.get_train_examples(args.data_dir ) if limit_length is not None: __lowercase = examples[:limit_length] __lowercase = glue_convert_examples_to_features( _lowerCAmelCase , _lowerCAmelCase , max_length=args.max_seq_length , label_list=_lowerCAmelCase , output_mode=self.output_mode , ) __lowercase = time.time() torch.save(self.features , _lowerCAmelCase ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' ) def __len__( self : Dict ) -> Optional[int]: """simple docstring""" return len(self.features ) def __getitem__( self : Tuple , _lowerCAmelCase : Optional[int] ) -> InputFeatures: """simple docstring""" return self.features[i] def _a ( self : str ) -> int: """simple docstring""" return self.label_list
53
from __future__ import annotations from scipy.special import comb # type: ignore class __UpperCamelCase : def __init__( self : int , _lowerCAmelCase : list[tuple[float, float]] ) -> Any: """simple docstring""" __lowercase = list_of_points # Degree determines the flexibility of the curve. # Degree = 1 will produce a straight line. __lowercase = len(_lowerCAmelCase ) - 1 def _a ( self : Tuple , _lowerCAmelCase : float ) -> list[float]: """simple docstring""" assert 0 <= t <= 1, "Time t must be between 0 and 1." __lowercase = [] for i in range(len(self.list_of_points ) ): # basis function for each i output_values.append( comb(self.degree , _lowerCAmelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) ) # the basis must sum up to 1 for it to produce a valid Bezier curve. assert round(sum(_lowerCAmelCase ) , 5 ) == 1 return output_values def _a ( self : List[str] , _lowerCAmelCase : float ) -> tuple[float, float]: """simple docstring""" assert 0 <= t <= 1, "Time t must be between 0 and 1." __lowercase = self.basis_function(_lowerCAmelCase ) __lowercase = 0.0 __lowercase = 0.0 for i in range(len(self.list_of_points ) ): # For all points, sum up the product of i-th basis function and i-th point. x += basis_function[i] * self.list_of_points[i][0] y += basis_function[i] * self.list_of_points[i][1] return (x, y) def _a ( self : Optional[int] , _lowerCAmelCase : float = 0.01 ) -> Union[str, Any]: """simple docstring""" from matplotlib import pyplot as plt # type: ignore __lowercase = [] # x coordinates of points to plot __lowercase = [] # y coordinates of points to plot __lowercase = 0.0 while t <= 1: __lowercase = self.bezier_curve_function(_lowerCAmelCase ) to_plot_x.append(value[0] ) to_plot_y.append(value[1] ) t += step_size __lowercase = [i[0] for i in self.list_of_points] __lowercase = [i[1] for i in self.list_of_points] plt.plot( _lowerCAmelCase , _lowerCAmelCase , color="""blue""" , label="""Curve of Degree """ + str(self.degree ) , ) plt.scatter(_lowerCAmelCase , _lowerCAmelCase , color="""red""" , label="""Control Points""" ) plt.legend() plt.show() if __name__ == "__main__": import doctest doctest.testmod() BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1 BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2 BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
53
1
from __future__ import annotations def snake_case ( lowerCamelCase ): '''simple docstring''' if not nums: return 0 __lowercase = nums[0] __lowercase = 0 for num in nums[1:]: __lowercase , __lowercase = ( max_excluding + num, max(lowerCamelCase , lowerCamelCase ), ) return max(lowerCamelCase , lowerCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
53
import inspect import unittest from typing import List import numpy as np from transformers import EfficientFormerConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, ) from transformers.models.efficientformer.modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_vision_available(): from PIL import Image from transformers import EfficientFormerImageProcessor class __UpperCamelCase : def __init__( self : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : int = 13 , _lowerCAmelCase : int = 64 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 3 , _lowerCAmelCase : int = 3 , _lowerCAmelCase : bool = True , _lowerCAmelCase : bool = True , _lowerCAmelCase : int = 128 , _lowerCAmelCase : Optional[int]=[16, 32, 64, 128] , _lowerCAmelCase : int = 7 , _lowerCAmelCase : int = 4 , _lowerCAmelCase : int = 37 , _lowerCAmelCase : str = "gelu" , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : int = 10 , _lowerCAmelCase : float = 0.02 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 128 , _lowerCAmelCase : List[int] = [2, 2, 2, 2] , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 2 , ) -> Tuple: """simple docstring""" __lowercase = parent __lowercase = batch_size __lowercase = image_size __lowercase = patch_size __lowercase = num_channels __lowercase = is_training __lowercase = use_labels __lowercase = hidden_size __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = intermediate_size __lowercase = hidden_act __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = type_sequence_label_size __lowercase = initializer_range __lowercase = encoder_stride __lowercase = num_attention_outputs __lowercase = embed_dim __lowercase = embed_dim + 1 __lowercase = resolution __lowercase = depths __lowercase = hidden_sizes __lowercase = dim __lowercase = mlp_expansion_ratio def _a ( self : List[Any] ) -> Optional[int]: """simple docstring""" __lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowercase = None if self.use_labels: __lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase = self.get_config() return config, pixel_values, labels def _a ( self : Optional[Any] ) -> str: """simple docstring""" return EfficientFormerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , ) def _a ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] ) -> Optional[int]: """simple docstring""" __lowercase = TFEfficientFormerModel(config=_lowerCAmelCase ) __lowercase = model(_lowerCAmelCase , training=_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict ) -> Optional[Any]: """simple docstring""" __lowercase = self.type_sequence_label_size __lowercase = TFEfficientFormerForImageClassification(_lowerCAmelCase ) __lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images __lowercase = 1 __lowercase = TFEfficientFormerForImageClassification(_lowerCAmelCase ) __lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _a ( self : Optional[Any] ) -> Any: """simple docstring""" __lowercase = self.prepare_config_and_inputs() __lowercase , __lowercase , __lowercase = config_and_inputs __lowercase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): __snake_case :Any = ( ( TFEfficientFormerModel, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerForImageClassification, ) if is_tf_available() else () ) __snake_case :Any = ( { 'feature-extraction': TFEfficientFormerModel, 'image-classification': ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, ), } if is_tf_available() else {} ) __snake_case :int = False __snake_case :Optional[int] = False __snake_case :int = False __snake_case :Any = False __snake_case :Any = False def _a ( self : Tuple ) -> Tuple: """simple docstring""" __lowercase = TFEfficientFormerModelTester(self ) __lowercase = ConfigTester( self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 ) def _a ( self : Optional[int] ) -> int: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="""EfficientFormer does not use inputs_embeds""" ) def _a ( self : Optional[int] ) -> Any: """simple docstring""" pass @unittest.skip(reason="""EfficientFormer does not support input and output embeddings""" ) def _a ( self : int ) -> str: """simple docstring""" pass def _a ( self : Union[str, Any] ) -> List[str]: """simple docstring""" __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase = model_class(_lowerCAmelCase ) __lowercase = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase = [*signature.parameters.keys()] __lowercase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , _lowerCAmelCase ) def _a ( self : Union[str, Any] ) -> Dict: """simple docstring""" def check_hidden_states_output(_lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] ): __lowercase = model_class(_lowerCAmelCase ) __lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase ) __lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __lowercase = getattr( self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase ) if hasattr(self.model_tester , """encoder_seq_length""" ): __lowercase = self.model_tester.encoder_seq_length if hasattr(self.model_tester , """chunk_length""" ) and self.model_tester.chunk_length > 1: __lowercase = seq_length * self.model_tester.chunk_length else: __lowercase = self.model_tester.seq_length self.assertListEqual( list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) if config.is_encoder_decoder: __lowercase = outputs.decoder_hidden_states self.asseretIsInstance(_lowerCAmelCase , (list, tuple) ) self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase ) __lowercase = getattr(self.model_tester , """seq_length""" , _lowerCAmelCase ) __lowercase = getattr(self.model_tester , """decoder_seq_length""" , _lowerCAmelCase ) self.assertListEqual( list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , ) __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase = True check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowercase = True check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) def _a ( self : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any]=False ) -> Dict: """simple docstring""" __lowercase = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase ) if return_labels: if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def _a ( self : int ) -> int: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) @unittest.skip(reason="""EfficientFormer does not implement masked image modeling yet""" ) def _a ( self : Union[str, Any] ) -> str: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCAmelCase ) def _a ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase ) @slow def _a ( self : List[str] ) -> List[Any]: """simple docstring""" for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase = TFEfficientFormerModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) def _a ( self : Any ) -> List[str]: """simple docstring""" __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() __lowercase = True __lowercase = getattr(self.model_tester , """seq_length""" , _lowerCAmelCase ) __lowercase = getattr(self.model_tester , """encoder_seq_length""" , _lowerCAmelCase ) __lowercase = getattr(self.model_tester , """key_length""" , _lowerCAmelCase ) __lowercase = getattr(self.model_tester , """chunk_length""" , _lowerCAmelCase ) if chunk_length is not None and hasattr(self.model_tester , """num_hashes""" ): __lowercase = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: __lowercase = True __lowercase = False __lowercase = True __lowercase = model_class(_lowerCAmelCase ) __lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase ) __lowercase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_attention_outputs ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __lowercase = True __lowercase = model_class(_lowerCAmelCase ) __lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase ) __lowercase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_attention_outputs ) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , ) else: self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , ) def _a ( self : Dict ) -> Optional[int]: """simple docstring""" __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Prepare our model __lowercase = model_class(_lowerCAmelCase ) # These are maximally general inputs for the model, with multiple None dimensions # Hopefully this will catch any conditionals that fail for flexible shapes __lowercase = { key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=_lowerCAmelCase ) for key, val in model.input_signature.items() if key in model.dummy_inputs } __lowercase = model(_lowerCAmelCase ) self.assertTrue(outputs_dict is not None ) def snake_case ( ): '''simple docstring''' __lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class __UpperCamelCase ( unittest.TestCase ): @cached_property def _a ( self : Optional[Any] ) -> Any: """simple docstring""" return ( EfficientFormerImageProcessor.from_pretrained("""snap-research/efficientformer-l1-300""" ) if is_vision_available() else None ) @slow def _a ( self : Optional[Any] ) -> Any: """simple docstring""" __lowercase = TFEfficientFormerForImageClassification.from_pretrained("""snap-research/efficientformer-l1-300""" ) __lowercase = self.default_image_processor __lowercase = prepare_img() __lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""tf""" ) # forward pass __lowercase = model(**_lowerCAmelCase , training=_lowerCAmelCase ) # verify the logits __lowercase = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , _lowerCAmelCase ) __lowercase = tf.constant([-0.0_555, 0.4_825, -0.0_852] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) ) @slow def _a ( self : Optional[Any] ) -> Tuple: """simple docstring""" __lowercase = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained( """snap-research/efficientformer-l1-300""" ) __lowercase = self.default_image_processor __lowercase = prepare_img() __lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""tf""" ) # forward pass __lowercase = model(**_lowerCAmelCase , training=_lowerCAmelCase ) # verify the logits __lowercase = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , _lowerCAmelCase ) __lowercase = tf.constant([-0.1_312, 0.4_353, -1.0_499] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
53
1
# Function to print upper half of diamond (pyramid) def snake_case ( lowerCamelCase ): '''simple docstring''' for i in range(0 , lowerCamelCase ): for _ in range(0 , n - i - 1 ): # printing spaces print(""" """ , end="""""" ) for _ in range(0 , i + 1 ): # printing stars print("""* """ , end="""""" ) print() def snake_case ( lowerCamelCase ): '''simple docstring''' for i in range(lowerCamelCase , 0 , -1 ): for _ in range(lowerCamelCase , 0 , -1 ): # printing stars print("""* """ , end="""""" ) print() for _ in range(n - i + 1 , 0 , -1 ): # printing spaces print(""" """ , end="""""" ) def snake_case ( lowerCamelCase ): '''simple docstring''' if n <= 0: print(""" ... .... nothing printing :(""" ) return floyd(lowerCamelCase ) # upper half reverse_floyd(lowerCamelCase ) # lower half if __name__ == "__main__": print(r"""| /\ | |- | |- |--| |\ /| |-""") print(r"""|/ \| |- |_ |_ |__| | \/ | |_""") __UpperCamelCase : str = 1 while K: __UpperCamelCase : Optional[int] = int(input("""enter the number and , and see the magic : """)) print() pretty_print(user_number) __UpperCamelCase : str = int(input("""press 0 to exit... and 1 to continue...""")) print("""Good Bye...""")
53
import argparse import json import os import re import shutil import torch from transformers import BioGptConfig, BioGptForCausalLM from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() __UpperCamelCase : Tuple = 2 class __UpperCamelCase : def __init__( self : List[str] , *, # begin keyword-only arguments _lowerCAmelCase : Optional[int]="<s>" , _lowerCAmelCase : Optional[int]="<pad>" , _lowerCAmelCase : int="</s>" , _lowerCAmelCase : str="<unk>" , _lowerCAmelCase : List[str]=None , ) -> Tuple: """simple docstring""" __lowercase , __lowercase , __lowercase , __lowercase = bos, unk, pad, eos __lowercase = [] __lowercase = [] __lowercase = {} __lowercase = self.add_symbol(_lowerCAmelCase ) __lowercase = self.add_symbol(_lowerCAmelCase ) __lowercase = self.add_symbol(_lowerCAmelCase ) __lowercase = self.add_symbol(_lowerCAmelCase ) if extra_special_symbols: for s in extra_special_symbols: self.add_symbol(_lowerCAmelCase ) __lowercase = len(self.symbols ) def __eq__( self : Dict , _lowerCAmelCase : List[str] ) -> Any: """simple docstring""" return self.indices == other.indices def __getitem__( self : Any , _lowerCAmelCase : str ) -> Dict: """simple docstring""" if idx < len(self.symbols ): return self.symbols[idx] return self.unk_word def __len__( self : str ) -> List[str]: """simple docstring""" return len(self.symbols ) def __contains__( self : Union[str, Any] , _lowerCAmelCase : List[Any] ) -> Optional[int]: """simple docstring""" return sym in self.indices @classmethod def _a ( cls : Dict , _lowerCAmelCase : Optional[int] ) -> Union[str, Any]: """simple docstring""" __lowercase = cls() d.add_from_file(_lowerCAmelCase ) return d def _a ( self : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any]=1 , _lowerCAmelCase : Optional[int]=False ) -> Union[str, Any]: """simple docstring""" if word in self.indices and not overwrite: __lowercase = self.indices[word] __lowercase = self.count[idx] + n return idx else: __lowercase = len(self.symbols ) __lowercase = idx self.symbols.append(_lowerCAmelCase ) self.count.append(_lowerCAmelCase ) return idx def _a ( self : List[str] , _lowerCAmelCase : Optional[int] ) -> Optional[Any]: """simple docstring""" return 0 def _a ( self : Optional[Any] , _lowerCAmelCase : Dict ) -> str: """simple docstring""" if isinstance(_lowerCAmelCase , _lowerCAmelCase ): try: with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" ) as fd: self.add_from_file(_lowerCAmelCase ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception("""Incorrect encoding detected in {}, please rebuild the dataset""".format(_lowerCAmelCase ) ) return __lowercase = f.readlines() __lowercase = self._load_meta(_lowerCAmelCase ) for line in lines[indices_start_line:]: try: __lowercase , __lowercase = line.rstrip().rsplit(""" """ , 1 ) if field == "#fairseq:overwrite": __lowercase = True __lowercase , __lowercase = line.rsplit(""" """ , 1 ) else: __lowercase = False __lowercase = int(_lowerCAmelCase ) __lowercase = line if word in self and not overwrite: raise RuntimeError( """Duplicate word found when loading Dictionary: '{}'. """ """Duplicate words can overwrite earlier ones by adding the """ """#fairseq:overwrite flag at the end of the corresponding row """ """in the dictionary file. If using the Camembert model, please """ """download an updated copy of the model file.""".format(_lowerCAmelCase ) ) self.add_symbol(_lowerCAmelCase , n=_lowerCAmelCase , overwrite=_lowerCAmelCase ) except ValueError: raise ValueError("""Incorrect dictionary format, expected '<token> <cnt> [flags]'""" ) def snake_case ( lowerCamelCase ): '''simple docstring''' __lowercase = dict((re.sub(r"""@@$""" , """""" , lowerCamelCase ), v) if k.endswith("""@@""" ) else (re.sub(r"""$""" , """</w>""" , lowerCamelCase ), v) for k, v in d.items() ) __lowercase = """<s> <pad> </s> <unk>""".split() # restore the special tokens for k in keep_keys: del da[F'{k}</w>'] __lowercase = d[k] # restore return da def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' if not os.path.exists(lowerCamelCase ): raise ValueError(F'path {biogpt_checkpoint_path} does not exist!' ) os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase ) print(F'Writing results to {pytorch_dump_folder_path}' ) # handle various types of models __lowercase = os.path.join(lowerCamelCase , """checkpoint.pt""" ) if not os.path.isfile(lowerCamelCase ): raise ValueError(F'path to the file {checkpoint_file} does not exist!' ) __lowercase = torch.load(lowerCamelCase , map_location="""cpu""" ) __lowercase = chkpt["""cfg"""]["""model"""] # dicts __lowercase = os.path.join(lowerCamelCase , """dict.txt""" ) if not os.path.isfile(lowerCamelCase ): raise ValueError(F'path to the file {dict_file} does not exist!' ) __lowercase = Dictionary.load(lowerCamelCase ) __lowercase = rewrite_dict_keys(src_dict.indices ) __lowercase = len(lowerCamelCase ) __lowercase = os.path.join(lowerCamelCase , VOCAB_FILES_NAMES["""vocab_file"""] ) print(F'Generating {src_vocab_file} of {src_vocab_size} records' ) with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) ) # merges_file (bpecodes) __lowercase = os.path.join(lowerCamelCase , """bpecodes""" ) if not os.path.isfile(lowerCamelCase ): raise ValueError(F'path to the file {bpecodes_file} does not exist!' ) __lowercase = os.path.join(lowerCamelCase , VOCAB_FILES_NAMES["""merges_file"""] ) shutil.copyfile(lowerCamelCase , lowerCamelCase ) # model config __lowercase = os.path.join(lowerCamelCase , """config.json""" ) __lowercase = { """activation_dropout""": args["""activation_dropout"""], """architectures""": ["""BioGptForCausalLM"""], """attention_probs_dropout_prob""": args["""attention_dropout"""], """bos_token_id""": 0, """eos_token_id""": 2, """hidden_act""": args["""activation_fn"""], """hidden_dropout_prob""": args["""dropout"""], """hidden_size""": args["""decoder_embed_dim"""], """initializer_range""": 0.02, """intermediate_size""": args["""decoder_ffn_embed_dim"""], """layer_norm_eps""": 1e-12, """layerdrop""": args["""decoder_layerdrop"""], """max_position_embeddings""": args["""max_target_positions"""], """model_type""": """biogpt""", """num_attention_heads""": args["""decoder_attention_heads"""], """num_hidden_layers""": args["""decoder_layers"""], """pad_token_id""": 1, """scale_embedding""": not args["""no_scale_embedding"""], """tie_word_embeddings""": args["""share_decoder_input_output_embed"""], """vocab_size""": src_vocab_size, } # good hparam defaults to start with print(F'Generating {biogpt_model_config_file}' ) with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) ) # tokenizer config __lowercase = os.path.join(lowerCamelCase , lowerCamelCase ) __lowercase = { """bos_token""": """<s>""", """eos_token""": """</s>""", """model_max_length""": 1_024, """pad_token""": """<pad>""", """special_tokens_map_file""": None, """tokenizer_class""": """BioGptTokenizer""", """unk_token""": """<unk>""", } print(F'Generating {biogpt_tokenizer_config_file}' ) with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) ) # model __lowercase = chkpt["""model"""] # remove unneeded keys __lowercase = [ """decoder.version""", ] for k in ignore_keys: model_state_dict.pop(lowerCamelCase , lowerCamelCase ) __lowercase = list(model_state_dict.keys() ) for layer_name in layer_names: if layer_name.endswith("""output_projection.weight""" ): __lowercase = model_state_dict.pop(lowerCamelCase ) else: __lowercase = model_state_dict.pop(lowerCamelCase ) __lowercase = BioGptConfig.from_pretrained(lowerCamelCase ) __lowercase = BioGptForCausalLM(lowerCamelCase ) # check that it loads ok model_new.load_state_dict(lowerCamelCase ) # save __lowercase = os.path.join(lowerCamelCase , lowerCamelCase ) print(F'Generating {pytorch_weights_dump_path}' ) torch.save(lowerCamelCase , lowerCamelCase ) print("""Conversion is done!""" ) if __name__ == "__main__": __UpperCamelCase : int = argparse.ArgumentParser() # Required parameters parser.add_argument( """--biogpt_checkpoint_path""", default=None, type=str, required=True, help=( """Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,""" """ bpecodes, etc.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) __UpperCamelCase : Optional[Any] = parser.parse_args() convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
53
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available __UpperCamelCase : Optional[int] = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Union[str, Any] = ["""MLukeTokenizer"""] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mluke import MLukeTokenizer else: import sys __UpperCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
53
import tempfile import unittest import numpy as np from diffusers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionPipeline, PNDMScheduler, ) from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ): __snake_case :Union[str, Any] = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline' def _a ( self : Any , _lowerCAmelCase : str=0 ) -> str: """simple docstring""" __lowercase = np.random.RandomState(_lowerCAmelCase ) __lowercase = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def _a ( self : int ) -> List[Any]: """simple docstring""" __lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) __lowercase = self.get_dummy_inputs() __lowercase = pipe(**_lowerCAmelCase ).images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __lowercase = np.array([0.65_072, 0.58_492, 0.48_219, 0.55_521, 0.53_180, 0.55_939, 0.50_697, 0.39_800, 0.46_455] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _a ( self : Union[str, Any] ) -> str: """simple docstring""" __lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __lowercase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) __lowercase = self.get_dummy_inputs() __lowercase = pipe(**_lowerCAmelCase ).images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __lowercase = np.array([0.65_863, 0.59_425, 0.49_326, 0.56_313, 0.53_875, 0.56_627, 0.51_065, 0.39_777, 0.46_330] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _a ( self : Optional[int] ) -> Optional[int]: """simple docstring""" __lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __lowercase = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) __lowercase = self.get_dummy_inputs() __lowercase = pipe(**_lowerCAmelCase ).images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __lowercase = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _a ( self : Tuple ) -> int: """simple docstring""" __lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __lowercase = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) __lowercase = self.get_dummy_inputs() __lowercase = pipe(**_lowerCAmelCase ).images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __lowercase = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _a ( self : Tuple ) -> Union[str, Any]: """simple docstring""" __lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __lowercase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) __lowercase = self.get_dummy_inputs() __lowercase = pipe(**_lowerCAmelCase ).images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __lowercase = np.array([0.53_817, 0.60_812, 0.47_384, 0.49_530, 0.51_894, 0.49_814, 0.47_984, 0.38_958, 0.44_271] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _a ( self : Union[str, Any] ) -> List[str]: """simple docstring""" __lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) __lowercase = self.get_dummy_inputs() __lowercase = pipe(**_lowerCAmelCase ).images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __lowercase = np.array([0.53_895, 0.60_808, 0.47_933, 0.49_608, 0.51_886, 0.49_950, 0.48_053, 0.38_957, 0.44_200] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _a ( self : List[str] ) -> List[str]: """simple docstring""" __lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) __lowercase = self.get_dummy_inputs() __lowercase = 3 * [inputs["""prompt"""]] # forward __lowercase = pipe(**_lowerCAmelCase ) __lowercase = output.images[0, -3:, -3:, -1] __lowercase = self.get_dummy_inputs() __lowercase = 3 * [inputs.pop("""prompt""" )] __lowercase = pipe.tokenizer( _lowerCAmelCase , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors="""np""" , ) __lowercase = text_inputs["""input_ids"""] __lowercase = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] __lowercase = prompt_embeds # forward __lowercase = pipe(**_lowerCAmelCase ) __lowercase = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4 def _a ( self : int ) -> str: """simple docstring""" __lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) __lowercase = self.get_dummy_inputs() __lowercase = 3 * ["""this is a negative prompt"""] __lowercase = negative_prompt __lowercase = 3 * [inputs["""prompt"""]] # forward __lowercase = pipe(**_lowerCAmelCase ) __lowercase = output.images[0, -3:, -3:, -1] __lowercase = self.get_dummy_inputs() __lowercase = 3 * [inputs.pop("""prompt""" )] __lowercase = [] for p in [prompt, negative_prompt]: __lowercase = pipe.tokenizer( _lowerCAmelCase , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors="""np""" , ) __lowercase = text_inputs["""input_ids"""] embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] ) __lowercase , __lowercase = embeds # forward __lowercase = pipe(**_lowerCAmelCase ) __lowercase = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4 @nightly @require_onnxruntime @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): @property def _a ( self : Dict ) -> str: """simple docstring""" return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _a ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" __lowercase = ort.SessionOptions() __lowercase = False return options def _a ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" __lowercase = OnnxStableDiffusionPipeline.from_pretrained( """CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) __lowercase = """A painting of a squirrel eating a burger""" np.random.seed(0 ) __lowercase = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="""np""" ) __lowercase = output.images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) __lowercase = np.array([0.0_452, 0.0_390, 0.0_087, 0.0_350, 0.0_617, 0.0_364, 0.0_544, 0.0_523, 0.0_720] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _a ( self : Tuple ) -> Any: """simple docstring""" __lowercase = DDIMScheduler.from_pretrained( """runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" ) __lowercase = OnnxStableDiffusionPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) __lowercase = """open neural network exchange""" __lowercase = np.random.RandomState(0 ) __lowercase = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCAmelCase , output_type="""np""" ) __lowercase = output.images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) __lowercase = np.array([0.2_867, 0.1_974, 0.1_481, 0.7_294, 0.7_251, 0.6_667, 0.4_194, 0.5_642, 0.6_486] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _a ( self : Dict ) -> Dict: """simple docstring""" __lowercase = LMSDiscreteScheduler.from_pretrained( """runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" ) __lowercase = OnnxStableDiffusionPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) __lowercase = """open neural network exchange""" __lowercase = np.random.RandomState(0 ) __lowercase = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCAmelCase , output_type="""np""" ) __lowercase = output.images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) __lowercase = np.array([0.2_306, 0.1_959, 0.1_593, 0.6_549, 0.6_394, 0.5_408, 0.5_065, 0.6_010, 0.6_161] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _a ( self : str ) -> List[str]: """simple docstring""" __lowercase = 0 def test_callback_fn(_lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : np.ndarray ) -> None: __lowercase = True nonlocal number_of_steps number_of_steps += 1 if step == 0: assert latents.shape == (1, 4, 64, 64) __lowercase = latents[0, -3:, -3:, -1] __lowercase = np.array( [-0.6_772, -0.3_835, -1.2_456, 0.1_905, -1.0_974, 0.6_967, -1.9_353, 0.0_178, 1.0_167] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3 elif step == 5: assert latents.shape == (1, 4, 64, 64) __lowercase = latents[0, -3:, -3:, -1] __lowercase = np.array( [-0.3_351, 0.2_241, -0.1_837, -0.2_325, -0.6_577, 0.3_393, -0.0_241, 0.5_899, 1.3_875] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3 __lowercase = False __lowercase = OnnxStableDiffusionPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) __lowercase = """Andromeda galaxy in a bottle""" __lowercase = np.random.RandomState(0 ) pipe( prompt=_lowerCAmelCase , num_inference_steps=5 , guidance_scale=7.5 , generator=_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 , ) assert test_callback_fn.has_been_called assert number_of_steps == 6 def _a ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __lowercase = OnnxStableDiffusionPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) assert pipe.safety_checker is None __lowercase = pipe("""example prompt""" , num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(_lowerCAmelCase ) __lowercase = OnnxStableDiffusionPipeline.from_pretrained(_lowerCAmelCase ) # sanity check that the pipeline still works assert pipe.safety_checker is None __lowercase = pipe("""example prompt""" , num_inference_steps=2 ).images[0] assert image is not None
53
1
def snake_case ( lowerCamelCase = 1_000_000 ): '''simple docstring''' __lowercase = limit + 1 __lowercase = [0] * limit for first_term in range(1 , lowerCamelCase ): for n in range(lowerCamelCase , lowerCamelCase , lowerCamelCase ): __lowercase = first_term + n / first_term if common_difference % 4: # d must be divisble by 4 continue else: common_difference /= 4 if ( first_term > common_difference and first_term < 4 * common_difference ): # since x,y,z are positive integers frequency[n] += 1 # so z>0 and a>d ,also 4d<a __lowercase = sum(1 for x in frequency[1:limit] if x == 10 ) return count if __name__ == "__main__": print(F'''{solution() = }''')
53
def snake_case ( lowerCamelCase ): '''simple docstring''' __lowercase = """""" for ch in key: if ch == " " or ch not in key_no_dups and ch.isalpha(): key_no_dups += ch return key_no_dups def snake_case ( lowerCamelCase ): '''simple docstring''' __lowercase = [chr(i + 65 ) for i in range(26 )] # Remove duplicate characters from key __lowercase = remove_duplicates(key.upper() ) __lowercase = len(lowerCamelCase ) # First fill cipher with key characters __lowercase = {alphabet[i]: char for i, char in enumerate(lowerCamelCase )} # Then map remaining characters in alphabet to # the alphabet from the beginning for i in range(len(lowerCamelCase ) , 26 ): __lowercase = alphabet[i - offset] # Ensure we are not mapping letters to letters previously mapped while char in key: offset -= 1 __lowercase = alphabet[i - offset] __lowercase = char return cipher_alphabet def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' return "".join(cipher_map.get(lowerCamelCase , lowerCamelCase ) for ch in message.upper() ) def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = {v: k for k, v in cipher_map.items()} return "".join(rev_cipher_map.get(lowerCamelCase , lowerCamelCase ) for ch in message.upper() ) def snake_case ( ): '''simple docstring''' __lowercase = input("""Enter message to encode or decode: """ ).strip() __lowercase = input("""Enter keyword: """ ).strip() __lowercase = input("""Encipher or decipher? E/D:""" ).strip()[0].lower() try: __lowercase = {"""e""": encipher, """d""": decipher}[option] except KeyError: raise KeyError("""invalid input option""" ) __lowercase = create_cipher_map(lowerCamelCase ) print(func(lowerCamelCase , lowerCamelCase ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
53
1
__UpperCamelCase : Union[str, Any] = { """a""": """AAAAA""", """b""": """AAAAB""", """c""": """AAABA""", """d""": """AAABB""", """e""": """AABAA""", """f""": """AABAB""", """g""": """AABBA""", """h""": """AABBB""", """i""": """ABAAA""", """j""": """BBBAA""", """k""": """ABAAB""", """l""": """ABABA""", """m""": """ABABB""", """n""": """ABBAA""", """o""": """ABBAB""", """p""": """ABBBA""", """q""": """ABBBB""", """r""": """BAAAA""", """s""": """BAAAB""", """t""": """BAABA""", """u""": """BAABB""", """v""": """BBBAB""", """w""": """BABAA""", """x""": """BABAB""", """y""": """BABBA""", """z""": """BABBB""", """ """: """ """, } __UpperCamelCase : List[str] = {value: key for key, value in encode_dict.items()} def snake_case ( lowerCamelCase ): '''simple docstring''' __lowercase = """""" for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception("""encode() accepts only letters of the alphabet and spaces""" ) return encoded def snake_case ( lowerCamelCase ): '''simple docstring''' if set(lowerCamelCase ) - {"A", "B", " "} != set(): raise Exception("""decode() accepts only 'A', 'B' and spaces""" ) __lowercase = """""" for word in coded.split(): while len(lowerCamelCase ) != 0: decoded += decode_dict[word[:5]] __lowercase = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
53
import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): __snake_case :Any = IFInpaintingPipeline __snake_case :str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'} __snake_case :Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS __snake_case :str = PipelineTesterMixin.required_optional_params - {'latents'} def _a ( self : Union[str, Any] ) -> Tuple: """simple docstring""" return self._get_dummy_components() def _a ( self : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict=0 ) -> Any: """simple docstring""" if str(_lowerCAmelCase ).startswith("""mps""" ): __lowercase = torch.manual_seed(_lowerCAmelCase ) else: __lowercase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase ) __lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase ) __lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase ) __lowercase = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """mask_image""": mask_image, """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def _a ( self : Tuple ) -> Dict: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) def _a ( self : Union[str, Any] ) -> Dict: """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" ) def _a ( self : List[str] ) -> Tuple: """simple docstring""" super().test_save_load_floataa(expected_max_diff=1e-1 ) def _a ( self : Tuple ) -> Optional[Any]: """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def _a ( self : str ) -> Optional[int]: """simple docstring""" self._test_save_load_local() def _a ( self : List[str] ) -> List[Any]: """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1e-2 , )
53
1
import os import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers.models.realm.configuration_realm import RealmConfig from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer class __UpperCamelCase ( _lowerCAmelCase ): def _a ( self : List[str] ) -> List[str]: """simple docstring""" __lowercase = tempfile.mkdtemp() __lowercase = 5 # Realm tok __lowercase = [ """[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """test""", """question""", """this""", """is""", """the""", """first""", """second""", """third""", """fourth""", """fifth""", """record""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] __lowercase = os.path.join(self.tmpdirname , """realm_tokenizer""" ) os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase ) __lowercase = os.path.join(_lowerCAmelCase , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) __lowercase = os.path.join(self.tmpdirname , """realm_block_records""" ) os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase ) def _a ( self : Dict ) -> RealmTokenizer: """simple docstring""" return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , """realm_tokenizer""" ) ) def _a ( self : int ) -> List[str]: """simple docstring""" shutil.rmtree(self.tmpdirname ) def _a ( self : Tuple ) -> List[str]: """simple docstring""" __lowercase = RealmConfig(num_block_records=self.num_block_records ) return config def _a ( self : Tuple ) -> Union[str, Any]: """simple docstring""" __lowercase = Dataset.from_dict( { """id""": ["""0""", """1"""], """question""": ["""foo""", """bar"""], """answers""": [["""Foo""", """Bar"""], ["""Bar"""]], } ) return dataset def _a ( self : Optional[int] ) -> str: """simple docstring""" __lowercase = np.array( [ B"""This is the first record""", B"""This is the second record""", B"""This is the third record""", B"""This is the fourth record""", B"""This is the fifth record""", B"""This is a longer longer longer record""", ] , dtype=_lowerCAmelCase , ) return block_records def _a ( self : Tuple ) -> str: """simple docstring""" __lowercase = RealmRetriever( block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , ) return retriever def _a ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" __lowercase = self.get_config() __lowercase = self.get_dummy_retriever() __lowercase = retriever.tokenizer __lowercase = np.array([0, 3] , dtype="""long""" ) __lowercase = tokenizer(["""Test question"""] ).input_ids __lowercase = tokenizer( ["""the fourth"""] , add_special_tokens=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , ).input_ids __lowercase = config.reader_seq_len __lowercase , __lowercase , __lowercase , __lowercase = retriever( _lowerCAmelCase , _lowerCAmelCase , answer_ids=_lowerCAmelCase , max_length=_lowerCAmelCase , return_tensors="""np""" ) self.assertEqual(len(_lowerCAmelCase ) , 2 ) self.assertEqual(len(_lowerCAmelCase ) , 2 ) self.assertEqual(len(_lowerCAmelCase ) , 2 ) self.assertEqual(concat_inputs.input_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) ) self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """first""", """record""", """[SEP]"""] , ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """fourth""", """record""", """[SEP]"""] , ) def _a ( self : Optional[Any] ) -> int: """simple docstring""" __lowercase = self.get_config() __lowercase = self.get_dummy_retriever() __lowercase = retriever.tokenizer __lowercase = np.array([0, 3, 5] , dtype="""long""" ) __lowercase = tokenizer(["""Test question"""] ).input_ids __lowercase = tokenizer( ["""the fourth""", """longer longer"""] , add_special_tokens=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , ).input_ids __lowercase = config.reader_seq_len __lowercase , __lowercase , __lowercase , __lowercase = retriever( _lowerCAmelCase , _lowerCAmelCase , answer_ids=_lowerCAmelCase , max_length=_lowerCAmelCase , return_tensors="""np""" ) self.assertEqual([False, True, True] , _lowerCAmelCase ) self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , _lowerCAmelCase ) self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , _lowerCAmelCase ) def _a ( self : int ) -> Optional[int]: """simple docstring""" __lowercase = self.get_dummy_retriever() retriever.save_pretrained(os.path.join(self.tmpdirname , """realm_block_records""" ) ) # Test local path __lowercase = retriever.from_pretrained(os.path.join(self.tmpdirname , """realm_block_records""" ) ) self.assertEqual(retriever.block_records[0] , B"""This is the first record""" ) # Test mocked remote path with patch("""transformers.models.realm.retrieval_realm.hf_hub_download""" ) as mock_hf_hub_download: __lowercase = os.path.join( os.path.join(self.tmpdirname , """realm_block_records""" ) , _REALM_BLOCK_RECORDS_FILENAME ) __lowercase = RealmRetriever.from_pretrained("""google/realm-cc-news-pretrained-openqa""" ) self.assertEqual(retriever.block_records[0] , B"""This is the first record""" )
53
import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class __UpperCamelCase ( _lowerCAmelCase ): __snake_case :str = (UnCLIPScheduler,) def _a ( self : Optional[int] , **_lowerCAmelCase : Any ) -> Tuple: """simple docstring""" __lowercase = { """num_train_timesteps""": 1000, """variance_type""": """fixed_small_log""", """clip_sample""": True, """clip_sample_range""": 1.0, """prediction_type""": """epsilon""", } config.update(**_lowerCAmelCase ) return config def _a ( self : Dict ) -> List[Any]: """simple docstring""" for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=_lowerCAmelCase ) def _a ( self : List[str] ) -> Tuple: """simple docstring""" for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=_lowerCAmelCase ) def _a ( self : Any ) -> Any: """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=_lowerCAmelCase ) def _a ( self : Any ) -> Optional[Any]: """simple docstring""" for clip_sample_range in [1, 5, 10, 20]: self.check_over_configs(clip_sample_range=_lowerCAmelCase ) def _a ( self : Optional[int] ) -> Tuple: """simple docstring""" for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=_lowerCAmelCase ) def _a ( self : str ) -> int: """simple docstring""" for time_step in [0, 500, 999]: for prev_timestep in [None, 5, 100, 250, 500, 750]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=_lowerCAmelCase , prev_timestep=_lowerCAmelCase ) def _a ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __lowercase = self.scheduler_classes[0] __lowercase = self.get_scheduler_config(variance_type="""fixed_small_log""" ) __lowercase = scheduler_class(**_lowerCAmelCase ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1e-5 def _a ( self : str ) -> Optional[int]: """simple docstring""" __lowercase = self.scheduler_classes[0] __lowercase = self.get_scheduler_config(variance_type="""learned_range""" ) __lowercase = scheduler_class(**_lowerCAmelCase ) __lowercase = 0.5 assert scheduler._get_variance(1 , predicted_variance=_lowerCAmelCase ) - -10.1_712_790 < 1e-5 assert scheduler._get_variance(487 , predicted_variance=_lowerCAmelCase ) - -5.7_998_052 < 1e-5 assert scheduler._get_variance(999 , predicted_variance=_lowerCAmelCase ) - -0.0_010_011 < 1e-5 def _a ( self : Optional[Any] ) -> List[Any]: """simple docstring""" __lowercase = self.scheduler_classes[0] __lowercase = self.get_scheduler_config() __lowercase = scheduler_class(**_lowerCAmelCase ) __lowercase = scheduler.timesteps __lowercase = self.dummy_model() __lowercase = self.dummy_sample_deter __lowercase = torch.manual_seed(0 ) for i, t in enumerate(_lowerCAmelCase ): # 1. predict noise residual __lowercase = model(_lowerCAmelCase , _lowerCAmelCase ) # 2. predict previous mean of sample x_t-1 __lowercase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample __lowercase = pred_prev_sample __lowercase = torch.sum(torch.abs(_lowerCAmelCase ) ) __lowercase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_sum.item() - 252.2_682_495 ) < 1e-2 assert abs(result_mean.item() - 0.3_284_743 ) < 1e-3 def _a ( self : List[str] ) -> str: """simple docstring""" __lowercase = self.scheduler_classes[0] __lowercase = self.get_scheduler_config() __lowercase = scheduler_class(**_lowerCAmelCase ) scheduler.set_timesteps(25 ) __lowercase = scheduler.timesteps __lowercase = self.dummy_model() __lowercase = self.dummy_sample_deter __lowercase = torch.manual_seed(0 ) for i, t in enumerate(_lowerCAmelCase ): # 1. predict noise residual __lowercase = model(_lowerCAmelCase , _lowerCAmelCase ) if i + 1 == timesteps.shape[0]: __lowercase = None else: __lowercase = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 __lowercase = scheduler.step( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , prev_timestep=_lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample __lowercase = pred_prev_sample __lowercase = torch.sum(torch.abs(_lowerCAmelCase ) ) __lowercase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_sum.item() - 258.2_044_983 ) < 1e-2 assert abs(result_mean.item() - 0.3_362_038 ) < 1e-3 def _a ( self : str ) -> Union[str, Any]: """simple docstring""" pass def _a ( self : int ) -> List[str]: """simple docstring""" pass
53
1
__UpperCamelCase : dict[str, float] = { "km/h": 1.0, "m/s": 3.6, "mph": 1.6_0_9_3_4_4, "knot": 1.8_5_2, } __UpperCamelCase : dict[str, float] = { "km/h": 1.0, "m/s": 0.2_7_7_7_7_7_7_7_8, "mph": 0.6_2_1_3_7_1_1_9_2, "knot": 0.5_3_9_9_5_6_8_0_3, } def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' if unit_to not in speed_chart or unit_from not in speed_chart_inverse: __lowercase = ( F'Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n' F'Valid values are: {", ".join(lowerCamelCase )}' ) raise ValueError(lowerCamelCase ) return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 ) if __name__ == "__main__": import doctest doctest.testmod()
53
import os import time import warnings from dataclasses import dataclass, field from enum import Enum from typing import List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import logging from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors from ..processors.utils import InputFeatures __UpperCamelCase : Any = logging.get_logger(__name__) @dataclass class __UpperCamelCase : __snake_case :str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys() )} ) __snake_case :str = field( metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} ) __snake_case :int = field( default=1_2_8 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) __snake_case :bool = field( default=_lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) def _a ( self : Dict ) -> List[Any]: """simple docstring""" __lowercase = self.task_name.lower() class __UpperCamelCase ( _lowerCAmelCase ): __snake_case :Optional[int] = 'train' __snake_case :int = 'dev' __snake_case :Any = 'test' class __UpperCamelCase ( _lowerCAmelCase ): __snake_case :GlueDataTrainingArguments __snake_case :str __snake_case :List[InputFeatures] def __init__( self : Dict , _lowerCAmelCase : GlueDataTrainingArguments , _lowerCAmelCase : PreTrainedTokenizerBase , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Union[str, Split] = Split.train , _lowerCAmelCase : Optional[str] = None , ) -> List[Any]: """simple docstring""" warnings.warn( """This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """ """library. You can have a look at this example script for pointers: """ """https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , _lowerCAmelCase , ) __lowercase = args __lowercase = glue_processors[args.task_name]() __lowercase = glue_output_modes[args.task_name] if isinstance(_lowerCAmelCase , _lowerCAmelCase ): try: __lowercase = Split[mode] except KeyError: raise KeyError("""mode is not a valid split name""" ) # Load data features from cache or dataset file __lowercase = os.path.join( cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , ) __lowercase = self.processor.get_labels() if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in ( "RobertaTokenizer", "RobertaTokenizerFast", "XLMRobertaTokenizer", "BartTokenizer", "BartTokenizerFast", ): # HACK(label indices are swapped in RoBERTa pretrained model) __lowercase , __lowercase = label_list[2], label_list[1] __lowercase = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. __lowercase = cached_features_file + """.lock""" with FileLock(_lowerCAmelCase ): if os.path.exists(_lowerCAmelCase ) and not args.overwrite_cache: __lowercase = time.time() __lowercase = torch.load(_lowerCAmelCase ) logger.info( F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start ) else: logger.info(F'Creating features from dataset file at {args.data_dir}' ) if mode == Split.dev: __lowercase = self.processor.get_dev_examples(args.data_dir ) elif mode == Split.test: __lowercase = self.processor.get_test_examples(args.data_dir ) else: __lowercase = self.processor.get_train_examples(args.data_dir ) if limit_length is not None: __lowercase = examples[:limit_length] __lowercase = glue_convert_examples_to_features( _lowerCAmelCase , _lowerCAmelCase , max_length=args.max_seq_length , label_list=_lowerCAmelCase , output_mode=self.output_mode , ) __lowercase = time.time() torch.save(self.features , _lowerCAmelCase ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' ) def __len__( self : Dict ) -> Optional[int]: """simple docstring""" return len(self.features ) def __getitem__( self : Tuple , _lowerCAmelCase : Optional[int] ) -> InputFeatures: """simple docstring""" return self.features[i] def _a ( self : str ) -> int: """simple docstring""" return self.label_list
53
1
import inspect import unittest from typing import List import numpy as np from transformers import EfficientFormerConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, ) from transformers.models.efficientformer.modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_vision_available(): from PIL import Image from transformers import EfficientFormerImageProcessor class __UpperCamelCase : def __init__( self : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : int = 13 , _lowerCAmelCase : int = 64 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 3 , _lowerCAmelCase : int = 3 , _lowerCAmelCase : bool = True , _lowerCAmelCase : bool = True , _lowerCAmelCase : int = 128 , _lowerCAmelCase : Optional[int]=[16, 32, 64, 128] , _lowerCAmelCase : int = 7 , _lowerCAmelCase : int = 4 , _lowerCAmelCase : int = 37 , _lowerCAmelCase : str = "gelu" , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : int = 10 , _lowerCAmelCase : float = 0.02 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 128 , _lowerCAmelCase : List[int] = [2, 2, 2, 2] , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 2 , ) -> Tuple: """simple docstring""" __lowercase = parent __lowercase = batch_size __lowercase = image_size __lowercase = patch_size __lowercase = num_channels __lowercase = is_training __lowercase = use_labels __lowercase = hidden_size __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = intermediate_size __lowercase = hidden_act __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = type_sequence_label_size __lowercase = initializer_range __lowercase = encoder_stride __lowercase = num_attention_outputs __lowercase = embed_dim __lowercase = embed_dim + 1 __lowercase = resolution __lowercase = depths __lowercase = hidden_sizes __lowercase = dim __lowercase = mlp_expansion_ratio def _a ( self : List[Any] ) -> Optional[int]: """simple docstring""" __lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowercase = None if self.use_labels: __lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase = self.get_config() return config, pixel_values, labels def _a ( self : Optional[Any] ) -> str: """simple docstring""" return EfficientFormerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , ) def _a ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] ) -> Optional[int]: """simple docstring""" __lowercase = TFEfficientFormerModel(config=_lowerCAmelCase ) __lowercase = model(_lowerCAmelCase , training=_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict ) -> Optional[Any]: """simple docstring""" __lowercase = self.type_sequence_label_size __lowercase = TFEfficientFormerForImageClassification(_lowerCAmelCase ) __lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images __lowercase = 1 __lowercase = TFEfficientFormerForImageClassification(_lowerCAmelCase ) __lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _a ( self : Optional[Any] ) -> Any: """simple docstring""" __lowercase = self.prepare_config_and_inputs() __lowercase , __lowercase , __lowercase = config_and_inputs __lowercase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): __snake_case :Any = ( ( TFEfficientFormerModel, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerForImageClassification, ) if is_tf_available() else () ) __snake_case :Any = ( { 'feature-extraction': TFEfficientFormerModel, 'image-classification': ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, ), } if is_tf_available() else {} ) __snake_case :int = False __snake_case :Optional[int] = False __snake_case :int = False __snake_case :Any = False __snake_case :Any = False def _a ( self : Tuple ) -> Tuple: """simple docstring""" __lowercase = TFEfficientFormerModelTester(self ) __lowercase = ConfigTester( self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 ) def _a ( self : Optional[int] ) -> int: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="""EfficientFormer does not use inputs_embeds""" ) def _a ( self : Optional[int] ) -> Any: """simple docstring""" pass @unittest.skip(reason="""EfficientFormer does not support input and output embeddings""" ) def _a ( self : int ) -> str: """simple docstring""" pass def _a ( self : Union[str, Any] ) -> List[str]: """simple docstring""" __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase = model_class(_lowerCAmelCase ) __lowercase = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase = [*signature.parameters.keys()] __lowercase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , _lowerCAmelCase ) def _a ( self : Union[str, Any] ) -> Dict: """simple docstring""" def check_hidden_states_output(_lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] ): __lowercase = model_class(_lowerCAmelCase ) __lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase ) __lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __lowercase = getattr( self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase ) if hasattr(self.model_tester , """encoder_seq_length""" ): __lowercase = self.model_tester.encoder_seq_length if hasattr(self.model_tester , """chunk_length""" ) and self.model_tester.chunk_length > 1: __lowercase = seq_length * self.model_tester.chunk_length else: __lowercase = self.model_tester.seq_length self.assertListEqual( list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) if config.is_encoder_decoder: __lowercase = outputs.decoder_hidden_states self.asseretIsInstance(_lowerCAmelCase , (list, tuple) ) self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase ) __lowercase = getattr(self.model_tester , """seq_length""" , _lowerCAmelCase ) __lowercase = getattr(self.model_tester , """decoder_seq_length""" , _lowerCAmelCase ) self.assertListEqual( list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , ) __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase = True check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowercase = True check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) def _a ( self : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any]=False ) -> Dict: """simple docstring""" __lowercase = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase ) if return_labels: if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def _a ( self : int ) -> int: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) @unittest.skip(reason="""EfficientFormer does not implement masked image modeling yet""" ) def _a ( self : Union[str, Any] ) -> str: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCAmelCase ) def _a ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase ) @slow def _a ( self : List[str] ) -> List[Any]: """simple docstring""" for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase = TFEfficientFormerModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) def _a ( self : Any ) -> List[str]: """simple docstring""" __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() __lowercase = True __lowercase = getattr(self.model_tester , """seq_length""" , _lowerCAmelCase ) __lowercase = getattr(self.model_tester , """encoder_seq_length""" , _lowerCAmelCase ) __lowercase = getattr(self.model_tester , """key_length""" , _lowerCAmelCase ) __lowercase = getattr(self.model_tester , """chunk_length""" , _lowerCAmelCase ) if chunk_length is not None and hasattr(self.model_tester , """num_hashes""" ): __lowercase = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: __lowercase = True __lowercase = False __lowercase = True __lowercase = model_class(_lowerCAmelCase ) __lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase ) __lowercase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_attention_outputs ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __lowercase = True __lowercase = model_class(_lowerCAmelCase ) __lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase ) __lowercase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_attention_outputs ) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , ) else: self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , ) def _a ( self : Dict ) -> Optional[int]: """simple docstring""" __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Prepare our model __lowercase = model_class(_lowerCAmelCase ) # These are maximally general inputs for the model, with multiple None dimensions # Hopefully this will catch any conditionals that fail for flexible shapes __lowercase = { key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=_lowerCAmelCase ) for key, val in model.input_signature.items() if key in model.dummy_inputs } __lowercase = model(_lowerCAmelCase ) self.assertTrue(outputs_dict is not None ) def snake_case ( ): '''simple docstring''' __lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class __UpperCamelCase ( unittest.TestCase ): @cached_property def _a ( self : Optional[Any] ) -> Any: """simple docstring""" return ( EfficientFormerImageProcessor.from_pretrained("""snap-research/efficientformer-l1-300""" ) if is_vision_available() else None ) @slow def _a ( self : Optional[Any] ) -> Any: """simple docstring""" __lowercase = TFEfficientFormerForImageClassification.from_pretrained("""snap-research/efficientformer-l1-300""" ) __lowercase = self.default_image_processor __lowercase = prepare_img() __lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""tf""" ) # forward pass __lowercase = model(**_lowerCAmelCase , training=_lowerCAmelCase ) # verify the logits __lowercase = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , _lowerCAmelCase ) __lowercase = tf.constant([-0.0_555, 0.4_825, -0.0_852] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) ) @slow def _a ( self : Optional[Any] ) -> Tuple: """simple docstring""" __lowercase = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained( """snap-research/efficientformer-l1-300""" ) __lowercase = self.default_image_processor __lowercase = prepare_img() __lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""tf""" ) # forward pass __lowercase = model(**_lowerCAmelCase , training=_lowerCAmelCase ) # verify the logits __lowercase = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , _lowerCAmelCase ) __lowercase = tf.constant([-0.1_312, 0.4_353, -1.0_499] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
53
import logging import math import os from dataclasses import dataclass, field from glob import glob from typing import Optional from torch.utils.data import ConcatDataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_WITH_LM_HEAD_MAPPING, AutoConfig, AutoModelWithLMHead, AutoTokenizer, DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForWholeWordMask, HfArgumentParser, LineByLineTextDataset, LineByLineWithRefDataset, PreTrainedTokenizer, TextDataset, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process __UpperCamelCase : List[Any] = logging.getLogger(__name__) __UpperCamelCase : Optional[Any] = list(MODEL_WITH_LM_HEAD_MAPPING.keys()) __UpperCamelCase : List[str] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class __UpperCamelCase : __snake_case :Optional[str] = field( default=_lowerCAmelCase , metadata={ 'help': ( 'The model checkpoint for weights initialization. Leave None if you want to train a model from' ' scratch.' ) } , ) __snake_case :Optional[str] = field( default=_lowerCAmelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(_lowerCAmelCase )} , ) __snake_case :Optional[str] = field( default=_lowerCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) __snake_case :Optional[str] = field( default=_lowerCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) __snake_case :Optional[str] = field( default=_lowerCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) @dataclass class __UpperCamelCase : __snake_case :Optional[str] = field( default=_lowerCAmelCase , metadata={'help': 'The input training data file (a text file).'} ) __snake_case :Optional[str] = field( default=_lowerCAmelCase , metadata={ 'help': ( 'The input training data files (multiple files in glob format). ' 'Very often splitting large files to smaller files can prevent tokenizer going out of memory' ) } , ) __snake_case :Optional[str] = field( default=_lowerCAmelCase , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , ) __snake_case :Optional[str] = field( default=_lowerCAmelCase , metadata={'help': 'An optional input train ref data file for whole word mask in Chinese.'} , ) __snake_case :Optional[str] = field( default=_lowerCAmelCase , metadata={'help': 'An optional input eval ref data file for whole word mask in Chinese.'} , ) __snake_case :bool = field( default=_lowerCAmelCase , metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'} , ) __snake_case :bool = field( default=_lowerCAmelCase , metadata={'help': 'Train with masked-language modeling loss instead of language modeling.'} ) __snake_case :bool = field(default=_lowerCAmelCase , metadata={'help': 'Whether ot not to use whole word mask.'} ) __snake_case :float = field( default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} ) __snake_case :float = field( default=1 / 6 , metadata={ 'help': ( 'Ratio of length of a span of masked tokens to surrounding context length for permutation language' ' modeling.' ) } , ) __snake_case :int = field( default=5 , metadata={'help': 'Maximum length of a span of masked tokens for permutation language modeling.'} ) __snake_case :int = field( default=-1 , metadata={ 'help': ( 'Optional input sequence length after tokenization.' 'The training dataset will be truncated in block of this size for training.' 'Default to the model max input length for single sentence inputs (take into account special tokens).' ) } , ) __snake_case :bool = field( default=_lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = None , ): '''simple docstring''' def _dataset(lowerCamelCase , lowerCamelCase=None ): if args.line_by_line: if ref_path is not None: if not args.whole_word_mask or not args.mlm: raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""" ) return LineByLineWithRefDataset( tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size , ref_path=lowerCamelCase , ) return LineByLineTextDataset(tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size ) else: return TextDataset( tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=lowerCamelCase , ) if evaluate: return _dataset(args.eval_data_file , args.eval_ref_file ) elif args.train_data_files: return ConcatDataset([_dataset(lowerCamelCase ) for f in glob(args.train_data_files )] ) else: return _dataset(args.train_data_file , args.train_ref_file ) def snake_case ( ): '''simple docstring''' __lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) __lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses() if data_args.eval_data_file is None and training_args.do_eval: raise ValueError( """Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """ """or remove the --do_eval argument.""" ) if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F'Output directory ({training_args.output_dir}) already exists and is not empty. Use' """ --overwrite_output_dir to overcome.""" ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( """Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("""Training/evaluation parameters %s""" , lowerCamelCase ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. if model_args.config_name: __lowercase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: __lowercase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir ) else: __lowercase = CONFIG_MAPPING[model_args.model_type]() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.tokenizer_name: __lowercase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: __lowercase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir ) else: raise ValueError( """You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another""" """ script, save it,and load it from here, using --tokenizer_name""" ) if model_args.model_name_or_path: __lowercase = AutoModelWithLMHead.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , ) else: logger.info("""Training new model from scratch""" ) __lowercase = AutoModelWithLMHead.from_config(lowerCamelCase ) model.resize_token_embeddings(len(lowerCamelCase ) ) if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm: raise ValueError( """BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the""" """--mlm flag (masked language modeling).""" ) if data_args.block_size <= 0: __lowercase = tokenizer.max_len # Our input block size will be the max possible for the model else: __lowercase = min(data_args.block_size , tokenizer.max_len ) # Get datasets __lowercase = ( get_dataset(lowerCamelCase , tokenizer=lowerCamelCase , cache_dir=model_args.cache_dir ) if training_args.do_train else None ) __lowercase = ( get_dataset(lowerCamelCase , tokenizer=lowerCamelCase , evaluate=lowerCamelCase , cache_dir=model_args.cache_dir ) if training_args.do_eval else None ) if config.model_type == "xlnet": __lowercase = DataCollatorForPermutationLanguageModeling( tokenizer=lowerCamelCase , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , ) else: if data_args.mlm and data_args.whole_word_mask: __lowercase = DataCollatorForWholeWordMask( tokenizer=lowerCamelCase , mlm_probability=data_args.mlm_probability ) else: __lowercase = DataCollatorForLanguageModeling( tokenizer=lowerCamelCase , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability ) # Initialize our Trainer __lowercase = Trainer( model=lowerCamelCase , args=lowerCamelCase , data_collator=lowerCamelCase , train_dataset=lowerCamelCase , eval_dataset=lowerCamelCase , prediction_loss_only=lowerCamelCase , ) # Training if training_args.do_train: __lowercase = ( model_args.model_name_or_path if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ) else None ) trainer.train(model_path=lowerCamelCase ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation __lowercase = {} if training_args.do_eval: logger.info("""*** Evaluate ***""" ) __lowercase = trainer.evaluate() __lowercase = math.exp(eval_output["""eval_loss"""] ) __lowercase = {"""perplexity""": perplexity} __lowercase = os.path.join(training_args.output_dir , """eval_results_lm.txt""" ) if trainer.is_world_master(): with open(lowerCamelCase , """w""" ) as writer: logger.info("""***** Eval results *****""" ) for key in sorted(result.keys() ): logger.info(""" %s = %s""" , lowerCamelCase , str(result[key] ) ) writer.write("""%s = %s\n""" % (key, str(result[key] )) ) results.update(lowerCamelCase ) return results def snake_case ( lowerCamelCase ): '''simple docstring''' main() if __name__ == "__main__": main()
53
1
def snake_case ( lowerCamelCase ): '''simple docstring''' if not isinstance(lowerCamelCase , lowerCamelCase ): raise ValueError("""check_bouncy() accepts only integer arguments""" ) __lowercase = str(lowerCamelCase ) __lowercase = """""".join(sorted(lowerCamelCase ) ) return sorted_str_n != str_n and sorted_str_n[::-1] != str_n def snake_case ( lowerCamelCase = 99 ): '''simple docstring''' if not 0 < percent < 100: raise ValueError("""solution() only accepts values from 0 to 100""" ) __lowercase = 0 __lowercase = 1 while True: if check_bouncy(lowerCamelCase ): bouncy_num += 1 if (bouncy_num / num) * 100 >= percent: return num num += 1 if __name__ == "__main__": from doctest import testmod testmod() print(F'''{solution(99)}''')
53
from __future__ import annotations def snake_case ( lowerCamelCase ): '''simple docstring''' if len(lowerCamelCase ) < 2: raise ValueError("""Monogons and Digons are not polygons in the Euclidean space""" ) if any(i <= 0 for i in nums ): raise ValueError("""All values must be greater than 0""" ) __lowercase = nums.copy() copy_nums.sort() return copy_nums[-1] < sum(copy_nums[:-1] ) if __name__ == "__main__": import doctest doctest.testmod()
53
1
def snake_case ( lowerCamelCase ): '''simple docstring''' __lowercase = len(lowerCamelCase ) __lowercase = sum(lowerCamelCase ) __lowercase = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): __lowercase = True for i in range(1 , s + 1 ): __lowercase = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): __lowercase = dp[i][j - 1] if arr[i - 1] <= j: __lowercase = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: __lowercase = s - 2 * j break return diff
53
from __future__ import annotations def snake_case ( lowerCamelCase ): '''simple docstring''' if not nums: return 0 __lowercase = nums[0] __lowercase = 0 for num in nums[1:]: __lowercase , __lowercase = ( max_excluding + num, max(lowerCamelCase , lowerCamelCase ), ) return max(lowerCamelCase , lowerCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
53
1