code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import math
import tensorflow as tf
from packaging import version
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> Union[str, Any]:
A__ = tf.convert_to_tensor(lowercase_ )
A__ = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
A__ = tf.convert_to_tensor(lowercase_ )
A__ = tf.cast(math.pi , x.dtype )
A__ = tf.cast(0.0_4_4_7_1_5 , x.dtype )
A__ = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(lowercase_ , 3 )) ))
return x * cdf
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
A__ = tf.convert_to_tensor(lowercase_ )
return x * tf.tanh(tf.math.softplus(lowercase_ ) )
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
A__ = tf.convert_to_tensor(lowercase_ )
A__ = tf.cast(0.0_4_4_7_1_5 , x.dtype )
A__ = tf.cast(0.7_9_7_8_8_4_5_6_0_8 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[int]:
A__ = tf.convert_to_tensor(lowercase_ )
A__ = tf.cast(1.7_0_2 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[int]:
return tf.clip_by_value(_gelu(lowercase_ ) , -10 , 10 )
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=-1 ) -> Union[str, Any]:
A__, A__ = tf.split(lowercase_ , 2 , axis=lowercase_ )
return a * tf.math.sigmoid(lowercase_ )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any:
return tf.keras.activations.gelu(lowercase_ , approximate=lowercase_ )
SCREAMING_SNAKE_CASE = tf.keras.activations.gelu
SCREAMING_SNAKE_CASE = approximate_gelu_wrap
else:
SCREAMING_SNAKE_CASE = _gelu
SCREAMING_SNAKE_CASE = _gelu_new
SCREAMING_SNAKE_CASE = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict:
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f"""function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}""" )
| 247 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> list[int]:
if length <= 0 or not isinstance(lowercase_ , lowercase_ ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(lowercase_ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 247 | 1 |
from __future__ import annotations
from collections import deque
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase : list[str] ):
'''simple docstring'''
_snake_case : list[dict] = []
self.adlist.append(
{'value': '', 'next_states': [], 'fail_state': 0, 'output': []} )
for keyword in keywords:
self.add_keyword(UpperCamelCase )
self.set_fail_transitions()
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : int , UpperCamelCase : str ):
'''simple docstring'''
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : str ):
'''simple docstring'''
_snake_case : List[Any] = 0
for character in keyword:
_snake_case : List[str] = self.find_next_state(UpperCamelCase , UpperCamelCase )
if next_state is None:
self.adlist.append(
{
'value': character,
'next_states': [],
'fail_state': 0,
'output': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
_snake_case : Dict = len(self.adlist ) - 1
else:
_snake_case : Optional[int] = next_state
self.adlist[current_state]["output"].append(UpperCamelCase )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : deque = deque()
for node in self.adlist[0]["next_states"]:
q.append(UpperCamelCase )
_snake_case : Optional[int] = 0
while q:
_snake_case : Optional[int] = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(UpperCamelCase )
_snake_case : Union[str, Any] = self.adlist[r]['fail_state']
while (
self.find_next_state(UpperCamelCase , self.adlist[child]['value'] ) is None
and state != 0
):
_snake_case : str = self.adlist[state]['fail_state']
_snake_case : Any = self.find_next_state(
UpperCamelCase , self.adlist[child]['value'] )
if self.adlist[child]["fail_state"] is None:
_snake_case : Any = 0
_snake_case : Union[str, Any] = (
self.adlist[child]['output']
+ self.adlist[self.adlist[child]['fail_state']]['output']
)
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : str ):
'''simple docstring'''
_snake_case : dict = {} # returns a dict with keywords and list of its occurrences
_snake_case : List[Any] = 0
for i in range(len(UpperCamelCase ) ):
while (
self.find_next_state(UpperCamelCase , string[i] ) is None
and current_state != 0
):
_snake_case : Any = self.adlist[current_state]['fail_state']
_snake_case : Any = self.find_next_state(UpperCamelCase , string[i] )
if next_state is None:
_snake_case : Dict = 0
else:
_snake_case : List[str] = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
_snake_case : int = []
result[key].append(i - len(UpperCamelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 260 |
from ..utils import DummyObject, requires_backends
class _lowerCAmelCase ( metaclass=UpperCAmelCase_ ):
'''simple docstring'''
a_ : Optional[int] =["""speech"""]
def __init__( self : Optional[int] , *UpperCamelCase : int , **UpperCamelCase : str ):
'''simple docstring'''
requires_backends(self , ['speech'] )
class _lowerCAmelCase ( metaclass=UpperCAmelCase_ ):
'''simple docstring'''
a_ : Optional[Any] =["""speech"""]
def __init__( self : Any , *UpperCamelCase : Any , **UpperCamelCase : List[Any] ):
'''simple docstring'''
requires_backends(self , ['speech'] )
| 260 | 1 |
def a( A : int ) -> str:
"""simple docstring"""
if number > 0:
raise ValueError("input must be a negative integer" )
a = len(bin(lowerCamelCase__ )[3:] )
a = bin(abs(lowerCamelCase__ ) - (1 << binary_number_length) )[3:]
a = (
(
"1"
+ "0" * (binary_number_length - len(lowerCamelCase__ ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 227 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :torch.FloatTensor
class lowercase__ ( snake_case__, snake_case__ ):
@register_to_config
def __init__( self : Optional[int] , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : Tuple[str] = ("DownEncoderBlock2D",) , snake_case__ : Tuple[str] = ("UpDecoderBlock2D",) , snake_case__ : Tuple[int] = (64,) , snake_case__ : int = 1 , snake_case__ : str = "silu" , snake_case__ : int = 3 , snake_case__ : int = 32 , snake_case__ : int = 256 , snake_case__ : int = 32 , snake_case__ : Optional[int] = None , snake_case__ : float = 0.18_215 , snake_case__ : str = "group" , ):
super().__init__()
# pass init params to Encoder
lowerCamelCase_ : List[str] =Encoder(
in_channels=snake_case__ , out_channels=snake_case__ , down_block_types=snake_case__ , block_out_channels=snake_case__ , layers_per_block=snake_case__ , act_fn=snake_case__ , norm_num_groups=snake_case__ , double_z=snake_case__ , )
lowerCamelCase_ : Union[str, Any] =vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCamelCase_ : List[Any] =nn.Convad(snake_case__ , snake_case__ , 1 )
lowerCamelCase_ : int =VectorQuantizer(snake_case__ , snake_case__ , beta=0.25 , remap=snake_case__ , sane_index_shape=snake_case__ )
lowerCamelCase_ : int =nn.Convad(snake_case__ , snake_case__ , 1 )
# pass init params to Decoder
lowerCamelCase_ : Union[str, Any] =Decoder(
in_channels=snake_case__ , out_channels=snake_case__ , up_block_types=snake_case__ , block_out_channels=snake_case__ , layers_per_block=snake_case__ , act_fn=snake_case__ , norm_num_groups=snake_case__ , norm_type=snake_case__ , )
@apply_forward_hook
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : torch.FloatTensor , snake_case__ : bool = True ):
lowerCamelCase_ : int =self.encoder(snake_case__ )
lowerCamelCase_ : Union[str, Any] =self.quant_conv(snake_case__ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=snake_case__ )
@apply_forward_hook
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : torch.FloatTensor , snake_case__ : bool = False , snake_case__ : bool = True ):
# also go through quantization layer
if not force_not_quantize:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Dict =self.quantize(snake_case__ )
else:
lowerCamelCase_ : List[Any] =h
lowerCamelCase_ : List[Any] =self.post_quant_conv(snake_case__ )
lowerCamelCase_ : Dict =self.decoder(snake_case__ , quant if self.config.norm_type == "spatial" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case__ )
def UpperCAmelCase__ ( self : Any , snake_case__ : torch.FloatTensor , snake_case__ : bool = True ):
lowerCamelCase_ : Dict =sample
lowerCamelCase_ : Optional[Any] =self.encode(snake_case__ ).latents
lowerCamelCase_ : str =self.decode(snake_case__ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case__ )
| 144 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class __a (UpperCamelCase_):
'''simple docstring'''
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : Dict = 8
# DPR tok
SCREAMING_SNAKE_CASE__ : int = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(self.tmpdirname , """dpr_tokenizer""" )
os.makedirs(_a , exist_ok=_a )
SCREAMING_SNAKE_CASE__ : str = os.path.join(_a , DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
SCREAMING_SNAKE_CASE__ : List[str] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
SCREAMING_SNAKE_CASE__ : int = dict(zip(_a , range(len(_a ) ) ) )
SCREAMING_SNAKE_CASE__ : Tuple = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
SCREAMING_SNAKE_CASE__ : List[Any] = {"""unk_token""": """<unk>"""}
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(self.tmpdirname , """bart_tokenizer""" )
os.makedirs(_a , exist_ok=_a )
SCREAMING_SNAKE_CASE__ : int = os.path.join(_a , BART_VOCAB_FILES_NAMES["""vocab_file"""] )
SCREAMING_SNAKE_CASE__ : Any = os.path.join(_a , BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_a ) )
def _a ( self ) -> DPRQuestionEncoderTokenizer:
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def _a ( self ) -> BartTokenizer:
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , """bart_tokenizer""" ) )
def _a ( self ) -> Tuple:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = os.path.join(self.tmpdirname , """rag_tokenizer""" )
SCREAMING_SNAKE_CASE__ : Any = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(_a )
rag_tokenizer.save_pretrained(_a )
SCREAMING_SNAKE_CASE__ : Any = RagTokenizer.from_pretrained(_a , config=_a )
self.assertIsInstance(new_rag_tokenizer.question_encoder , _a )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , _a )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = RagTokenizer.from_pretrained("""facebook/rag-token-nq""" )
SCREAMING_SNAKE_CASE__ : str = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(_a )
self.assertIsNotNone(_a )
@slow
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = RagTokenizer.from_pretrained("""facebook/rag-sequence-nq""" )
SCREAMING_SNAKE_CASE__ : str = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer(_a )
self.assertIsNotNone(_a )
| 56 |
"""simple docstring"""
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
while b:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = b, a % b
return a
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
return a if b == 0 else euclidean_gcd_recursive(__lowerCAmelCase , a % b )
def _lowercase ( ) -> Union[str, Any]:
print(F'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' )
print(F'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' )
print(F'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' )
print(F'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' )
print(F'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' )
print(F'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' )
print(F'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' )
print(F'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' )
if __name__ == "__main__":
main()
| 56 | 1 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowerCamelCase_ = {
'''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''],
'''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXJapaneseForCausalLM''',
'''GPTNeoXJapaneseLayer''',
'''GPTNeoXJapaneseModel''',
'''GPTNeoXJapanesePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 244 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=snake_case )
class UpperCAmelCase_ ( snake_case ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
UpperCamelCase =field(default="question-answering-extractive" , metadata={"include_in_asdict_even_if_is_default": True} )
UpperCamelCase =Features({"question": Value("string" ), "context": Value("string" )} )
UpperCamelCase =Features(
{
"answers": Sequence(
{
"text": Value("string" ),
"answer_start": Value("int32" ),
} )
} )
UpperCamelCase ="question"
UpperCamelCase ="context"
UpperCamelCase ="answers"
@property
def _lowerCamelCase ( self ) -> Dict[str, str]:
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 249 | 0 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Any:
'''simple docstring'''
super().tearDown()
gc.collect()
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->int:
'''simple docstring'''
A__ , A__ = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-canny''' , from_pt=UpperCAmelCase__ , dtype=jnp.bfloataa)
A__ , A__ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=UpperCAmelCase__ , from_pt=UpperCAmelCase__ , dtype=jnp.bfloataa)
A__ = controlnet_params
A__ = '''bird'''
A__ = jax.device_count()
A__ = pipe.prepare_text_inputs([prompts] * num_samples)
A__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''')
A__ = pipe.prepare_image_inputs([canny_image] * num_samples)
A__ = jax.random.PRNGKey(0)
A__ = jax.random.split(UpperCAmelCase__ , jax.device_count())
A__ = replicate(UpperCAmelCase__)
A__ = shard(UpperCAmelCase__)
A__ = shard(UpperCAmelCase__)
A__ = pipe(
prompt_ids=UpperCAmelCase__ , image=UpperCAmelCase__ , params=UpperCAmelCase__ , prng_seed=UpperCAmelCase__ , num_inference_steps=50 , jit=UpperCAmelCase__ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
A__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
A__ = images[0, 253:256, 253:256, -1]
A__ = jnp.asarray(jax.device_get(image_slice.flatten()))
A__ = jnp.array(
[0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078])
print(f"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[Any]:
'''simple docstring'''
A__ , A__ = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-openpose''' , from_pt=UpperCAmelCase__ , dtype=jnp.bfloataa)
A__ , A__ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=UpperCAmelCase__ , from_pt=UpperCAmelCase__ , dtype=jnp.bfloataa)
A__ = controlnet_params
A__ = '''Chef in the kitchen'''
A__ = jax.device_count()
A__ = pipe.prepare_text_inputs([prompts] * num_samples)
A__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png''')
A__ = pipe.prepare_image_inputs([pose_image] * num_samples)
A__ = jax.random.PRNGKey(0)
A__ = jax.random.split(UpperCAmelCase__ , jax.device_count())
A__ = replicate(UpperCAmelCase__)
A__ = shard(UpperCAmelCase__)
A__ = shard(UpperCAmelCase__)
A__ = pipe(
prompt_ids=UpperCAmelCase__ , image=UpperCAmelCase__ , params=UpperCAmelCase__ , prng_seed=UpperCAmelCase__ , num_inference_steps=50 , jit=UpperCAmelCase__ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
A__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
A__ = images[0, 253:256, 253:256, -1]
A__ = jnp.asarray(jax.device_get(image_slice.flatten()))
A__ = jnp.array(
[[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]])
print(f"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
| 353 |
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : float) ->float:
'''simple docstring'''
return 0.0
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> tuple[int | float, int | float]:
"""simple docstring"""
A__ = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
A__ = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> None:
"""simple docstring"""
A__ = 512
A__ = [1] + [0] * (size - 1)
A__ = [filter_type.process(lowercase_ ) for item in inputs]
A__ = [0] * (samplerate - size) # zero-padding
outputs += filler
A__ = np.abs(np.fft.fft(lowercase_ ) )
A__ = 20 * np.logaa(lowercase_ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
# Display within reasonable bounds
A__ = get_bounds(lowercase_ , lowercase_ )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('''Gain (dB)''' )
plt.plot(lowercase_ )
plt.show()
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> None:
"""simple docstring"""
A__ = 512
A__ = [1] + [0] * (size - 1)
A__ = [filter_type.process(lowercase_ ) for item in inputs]
A__ = [0] * (samplerate - size) # zero-padding
outputs += filler
A__ = np.angle(np.fft.fft(lowercase_ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('''Phase shift (Radians)''' )
plt.plot(np.unwrap(lowercase_ , -2 * pi ) )
plt.show()
| 231 | 0 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
UpperCamelCase = 'pt'
elif is_tf_available():
UpperCamelCase = 'tf'
else:
UpperCamelCase = 'jax'
class __UpperCAmelCase (_UpperCamelCase ,unittest.TestCase ):
__snake_case : Optional[int] = ByTaTokenizer
__snake_case : Tuple = False
def UpperCamelCase ( self: Any ):
'''simple docstring'''
super().setUp()
_SCREAMING_SNAKE_CASE = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCamelCase ( self: str ):
'''simple docstring'''
return ByTaTokenizer.from_pretrained("""google/byt5-small""" )
def UpperCamelCase ( self: Any , **UpperCAmelCase_: List[Any] ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: int , UpperCAmelCase_: Optional[Any]=False , UpperCAmelCase_: Dict=20 , UpperCAmelCase_: List[str]=5 ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
for i in range(len(lowerCAmelCase_ ) ):
try:
_SCREAMING_SNAKE_CASE = tokenizer.decode([i] , clean_up_tokenization_spaces=lowerCAmelCase_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_SCREAMING_SNAKE_CASE = list(filter(lambda UpperCAmelCase_ : re.match(R"""^[ a-zA-Z]+$""" , t[1] ) , lowerCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = list(filter(lambda UpperCAmelCase_ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowerCAmelCase_ ) , lowerCAmelCase_ ) )
if max_length is not None and len(lowerCAmelCase_ ) > max_length:
_SCREAMING_SNAKE_CASE = toks[:max_length]
if min_length is not None and len(lowerCAmelCase_ ) < min_length and len(lowerCAmelCase_ ) > 0:
while len(lowerCAmelCase_ ) < min_length:
_SCREAMING_SNAKE_CASE = toks + toks
# toks_str = [t[1] for t in toks]
_SCREAMING_SNAKE_CASE = [t[0] for t in toks]
# Ensure consistency
_SCREAMING_SNAKE_CASE = tokenizer.decode(lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
if " " not in output_txt and len(lowerCAmelCase_ ) > 1:
_SCREAMING_SNAKE_CASE = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowerCAmelCase_ )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowerCAmelCase_ )
)
if with_prefix_space:
_SCREAMING_SNAKE_CASE = """ """ + output_txt
_SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
return output_txt, output_ids
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.ta_base_tokenizer
_SCREAMING_SNAKE_CASE = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] )
_SCREAMING_SNAKE_CASE = tokenizer(["""hi""", """I went to the gym""", """"""] )
self.assertListEqual(batch_with_eos_added["""input_ids"""] , batch_without_eos_added["""input_ids"""] )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.ta_base_tokenizer
_SCREAMING_SNAKE_CASE = """Unicode €."""
_SCREAMING_SNAKE_CASE = tokenizer(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["""input_ids"""] , lowerCAmelCase_ )
# decoding
_SCREAMING_SNAKE_CASE = tokenizer.decode(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , """Unicode €.</s>""" )
_SCREAMING_SNAKE_CASE = tokenizer("""e è é ê ë""" )
_SCREAMING_SNAKE_CASE = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["""input_ids"""] , lowerCAmelCase_ )
# decoding
_SCREAMING_SNAKE_CASE = tokenizer.decode(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , """e è é ê ë</s>""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """e è é ê ë</s>""" )
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.ta_base_tokenizer
_SCREAMING_SNAKE_CASE = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
_SCREAMING_SNAKE_CASE = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
_SCREAMING_SNAKE_CASE = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
if FRAMEWORK != "jax":
_SCREAMING_SNAKE_CASE = list(batch.input_ids.numpy()[0] )
else:
_SCREAMING_SNAKE_CASE = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.ta_base_tokenizer
_SCREAMING_SNAKE_CASE = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_SCREAMING_SNAKE_CASE = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""" , lowerCAmelCase_ )
self.assertIn("""attention_mask""" , lowerCAmelCase_ )
self.assertNotIn("""decoder_input_ids""" , lowerCAmelCase_ )
self.assertNotIn("""decoder_attention_mask""" , lowerCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.ta_base_tokenizer
_SCREAMING_SNAKE_CASE = [
"""Summary of the text.""",
"""Another summary.""",
]
_SCREAMING_SNAKE_CASE = tokenizer(
text_target=lowerCAmelCase_ , max_length=32 , padding="""max_length""" , truncation=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.ta_base_tokenizer
_SCREAMING_SNAKE_CASE = ["""A long paragraph for summarization. </s>"""]
_SCREAMING_SNAKE_CASE = ["""Summary of the text. </s>"""]
# fmt: off
_SCREAMING_SNAKE_CASE = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
_SCREAMING_SNAKE_CASE = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
_SCREAMING_SNAKE_CASE = tokenizer(lowerCAmelCase_ , text_target=lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , batch["""input_ids"""][0] )
self.assertEqual(lowerCAmelCase_ , batch["""labels"""][0] )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_SCREAMING_SNAKE_CASE = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
_SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE = """ He is very happy, UNwant\u00E9d,running"""
_SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
tokenizer.save_pretrained(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE = tokenizer.__class__.from_pretrained(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE = after_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
shutil.rmtree(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
_SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
_SCREAMING_SNAKE_CASE = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
_SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
tokenizer.save_pretrained(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE = tokenizer.__class__.from_pretrained(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE = after_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_SCREAMING_SNAKE_CASE = tokenizer.__class__.from_pretrained(lowerCAmelCase_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowerCAmelCase_ )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCAmelCase_ )
with open(os.path.join(lowerCAmelCase_ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
_SCREAMING_SNAKE_CASE = json.load(lowerCAmelCase_ )
with open(os.path.join(lowerCAmelCase_ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
_SCREAMING_SNAKE_CASE = json.load(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE = [F'<extra_id_{i}>' for i in range(125 )]
_SCREAMING_SNAKE_CASE = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
_SCREAMING_SNAKE_CASE = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(lowerCAmelCase_ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
with open(os.path.join(lowerCAmelCase_ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_SCREAMING_SNAKE_CASE = tokenizer_class.from_pretrained(
lowerCAmelCase_ , )
self.assertIn(
"""an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_SCREAMING_SNAKE_CASE = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=lowerCAmelCase_ )]
_SCREAMING_SNAKE_CASE = tokenizer_class.from_pretrained(
lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , )
self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , )
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE = tokenizer_class.from_pretrained(lowerCAmelCase_ )
self.assertTrue(tokenizer.decode([255] ) == """""" )
def UpperCamelCase ( self: str ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
pass
def UpperCamelCase ( self: int ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Any ):
'''simple docstring'''
pass
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.get_tokenizers(fast=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
_SCREAMING_SNAKE_CASE = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""]
_SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_string(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
_SCREAMING_SNAKE_CASE = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(
lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
for attr in attributes_list:
setattr(lowerCAmelCase_ , attr + """_id""" , lowerCAmelCase_ )
self.assertEqual(getattr(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(getattr(lowerCAmelCase_ , attr + """_id""" ) , lowerCAmelCase_ )
setattr(lowerCAmelCase_ , attr + """_id""" , lowerCAmelCase_ )
self.assertEqual(getattr(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(getattr(lowerCAmelCase_ , attr + """_id""" ) , lowerCAmelCase_ )
setattr(lowerCAmelCase_ , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(lowerCAmelCase_ , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(lowerCAmelCase_ , """additional_special_tokens_ids""" ) , [] )
setattr(lowerCAmelCase_ , """additional_special_tokens_ids""" , [token_id_to_test_setters] )
self.assertListEqual(getattr(lowerCAmelCase_ , """additional_special_tokens""" ) , [token_to_test_setters] )
self.assertListEqual(getattr(lowerCAmelCase_ , """additional_special_tokens_ids""" ) , [token_id_to_test_setters] )
| 306 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : Dict, lowerCAmelCase_ : Tuple=1024, lowerCAmelCase_ : Optional[Any]=1024, lowerCAmelCase_ : Tuple=False, **lowerCAmelCase_ : Union[str, Any] ):
__lowerCAmelCase = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = SeqaSeqDataset(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, type_path='train', **lowerCAmelCase_ )
__lowerCAmelCase = tok.pad_token_id
def get_lens(lowerCAmelCase_ : Optional[Any] ):
__lowerCAmelCase = tqdm(
DataLoader(lowerCAmelCase_, batch_size=512, num_workers=8, shuffle=lowerCAmelCase_, collate_fn=ds.collate_fn ), desc=str(ds.len_file ), )
__lowerCAmelCase = []
for batch in dl:
__lowerCAmelCase = batch['input_ids'].ne(lowerCAmelCase_ ).sum(1 ).tolist()
__lowerCAmelCase = batch['labels'].ne(lowerCAmelCase_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowerCAmelCase_, lowerCAmelCase_ ):
max_lens.append(max(lowerCAmelCase_, lowerCAmelCase_ ) )
else:
max_lens.extend(lowerCAmelCase_ )
return max_lens
__lowerCAmelCase = get_lens(lowerCAmelCase_ )
__lowerCAmelCase = SeqaSeqDataset(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, type_path='val', **lowerCAmelCase_ )
__lowerCAmelCase = get_lens(lowerCAmelCase_ )
pickle_save(lowerCAmelCase_, train_ds.len_file )
pickle_save(lowerCAmelCase_, val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 284 | 0 |
"""simple docstring"""
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('0.12.2'):
raise Exception('requires fairseq >= 0.12.2')
if version.parse(fairseq.__version__) > version.parse('2'):
raise Exception('requires fairseq < v2')
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
_a = """Hello, World!"""
_a = """en_XX"""
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Optional[Any] = Path("data_bin" )
UpperCAmelCase_ : Dict = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(__lowerCamelCase ).parent ), checkpoint_file=Path(__lowerCamelCase ).name, _name="xmod_base", arch="xmod_base", task="multilingual_masked_lm", data_name_or_path=str(__lowerCamelCase ), bpe="sentencepiece", sentencepiece_model=str(Path(__lowerCamelCase ).parent / "sentencepiece.bpe.model" ), src_dict=str(data_dir / "dict.txt" ), )
xmod.eval() # disable dropout
print(__lowerCamelCase )
UpperCAmelCase_ : Optional[Any] = xmod.model.encoder.sentence_encoder
UpperCAmelCase_ : Optional[int] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings, hidden_size=xmod.cfg.model.encoder_embed_dim, num_hidden_layers=xmod.cfg.model.encoder_layers, num_attention_heads=xmod.cfg.model.encoder_attention_heads, intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim, max_position_embeddings=514, type_vocab_size=1, layer_norm_eps=1E-5, pre_norm=xmod.cfg.model.encoder_normalize_before, adapter_reduction_factor=getattr(xmod.cfg.model, "bottleneck", 2 ), adapter_layer_norm=xmod.cfg.model.adapter_layer_norm, adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm, ln_before_adapter=xmod.cfg.model.ln_before_adapter, languages=xmod.cfg.model.languages, )
if classification_head:
UpperCAmelCase_ : int = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our X-MOD config:", __lowerCamelCase )
UpperCAmelCase_ : List[str] = XmodForSequenceClassification(__lowerCamelCase ) if classification_head else XmodForMaskedLM(__lowerCamelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
UpperCAmelCase_ : List[str] = xmod_sent_encoder.embed_tokens.weight
UpperCAmelCase_ : str = xmod_sent_encoder.embed_positions.weight
UpperCAmelCase_ : Optional[Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
UpperCAmelCase_ : List[Any] = xmod_sent_encoder.layernorm_embedding.weight
UpperCAmelCase_ : Union[str, Any] = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
UpperCAmelCase_ : List[str] = model.roberta.encoder.layer[i]
UpperCAmelCase_ : Any = xmod_sent_encoder.layers[i]
# self attention
UpperCAmelCase_ : str = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("Dimensions of self-attention weights do not match." )
UpperCAmelCase_ : Union[str, Any] = xmod_layer.self_attn.q_proj.weight
UpperCAmelCase_ : str = xmod_layer.self_attn.q_proj.bias
UpperCAmelCase_ : Any = xmod_layer.self_attn.k_proj.weight
UpperCAmelCase_ : Dict = xmod_layer.self_attn.k_proj.bias
UpperCAmelCase_ : Tuple = xmod_layer.self_attn.v_proj.weight
UpperCAmelCase_ : Dict = xmod_layer.self_attn.v_proj.bias
# self-attention output
UpperCAmelCase_ : str = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("Dimensions of self-attention output weights do not match." )
UpperCAmelCase_ : Union[str, Any] = xmod_layer.self_attn.out_proj.weight
UpperCAmelCase_ : Optional[Any] = xmod_layer.self_attn.out_proj.bias
UpperCAmelCase_ : Dict = xmod_layer.self_attn_layer_norm.weight
UpperCAmelCase_ : Dict = xmod_layer.self_attn_layer_norm.bias
# intermediate
UpperCAmelCase_ : List[str] = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of intermediate weights do not match." )
UpperCAmelCase_ : str = xmod_layer.fca.weight
UpperCAmelCase_ : Tuple = xmod_layer.fca.bias
# output
UpperCAmelCase_ : List[Any] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of feed-forward weights do not match." )
UpperCAmelCase_ : Optional[Any] = xmod_layer.fca.weight
UpperCAmelCase_ : List[Any] = xmod_layer.fca.bias
UpperCAmelCase_ : Any = xmod_layer.final_layer_norm.weight
UpperCAmelCase_ : List[Any] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
UpperCAmelCase_ : Union[str, Any] = xmod_layer.adapter_layer_norm.weight
UpperCAmelCase_ : List[str] = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("Lists of language adapters do not match." )
for lang_code, adapter in xmod_layer.adapter_modules.items():
UpperCAmelCase_ : Union[str, Any] = bert_output.adapter_modules[lang_code]
UpperCAmelCase_ : Tuple = xmod_layer.adapter_modules[lang_code]
UpperCAmelCase_ : Optional[Any] = from_adapter.fca.weight
UpperCAmelCase_ : Any = from_adapter.fca.bias
UpperCAmelCase_ : List[Any] = from_adapter.fca.weight
UpperCAmelCase_ : int = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
UpperCAmelCase_ : int = xmod_sent_encoder.layer_norm.weight
UpperCAmelCase_ : Any = xmod_sent_encoder.layer_norm.bias
if classification_head:
UpperCAmelCase_ : Optional[int] = xmod.model.classification_heads["mnli"].dense.weight
UpperCAmelCase_ : Any = xmod.model.classification_heads["mnli"].dense.bias
UpperCAmelCase_ : Any = xmod.model.classification_heads["mnli"].out_proj.weight
UpperCAmelCase_ : Optional[int] = xmod.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
UpperCAmelCase_ : int = xmod.model.encoder.lm_head.dense.weight
UpperCAmelCase_ : str = xmod.model.encoder.lm_head.dense.bias
UpperCAmelCase_ : Optional[Any] = xmod.model.encoder.lm_head.layer_norm.weight
UpperCAmelCase_ : Optional[Any] = xmod.model.encoder.lm_head.layer_norm.bias
UpperCAmelCase_ : List[Any] = xmod.model.encoder.lm_head.weight
UpperCAmelCase_ : Optional[Any] = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
UpperCAmelCase_ : List[str] = xmod.encode(__lowerCamelCase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(__lowerCamelCase )
UpperCAmelCase_ : List[str] = model(__lowerCamelCase )[0]
if classification_head:
UpperCAmelCase_ : Optional[Any] = xmod.model.classification_heads["mnli"](xmod.extract_features(__lowerCamelCase ) )
else:
UpperCAmelCase_ : Dict = xmod.model(__lowerCamelCase, lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape, their_output.shape )
UpperCAmelCase_ : str = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
UpperCAmelCase_ : List[Any] = torch.allclose(__lowerCamelCase, __lowerCamelCase, atol=1E-3 )
print("Do both models output the same tensors?", "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
Path(__lowerCamelCase ).mkdir(parents=__lowerCamelCase, exist_ok=__lowerCamelCase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xmod_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
_a = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 351 |
"""simple docstring"""
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
_a = logging.getLogger()
def __a ( ):
UpperCAmelCase_ : Tuple = argparse.ArgumentParser()
parser.add_argument("-f" )
UpperCAmelCase_ : Dict = parser.parse_args()
return args.f
class A_ (lowercase__ ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = logging.StreamHandler(sys.stdout )
logger.addHandler(lowercase_ )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , "run_glue_deebert.py" )
with patch.object(lowercase_ , "argv" , lowercase_ ):
UpperCAmelCase_ : List[str] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowercase_ , 0.6_66 )
@slow
@require_torch_non_multi_gpu
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split()
self.run_and_check(lowercase_ )
UpperCAmelCase_ : Optional[Any] = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowercase_ )
UpperCAmelCase_ : Dict = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowercase_ )
| 23 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
__A ={"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__A ={
"vocab_file": {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt",
},
"tokenizer_file": {
"unc-nlp/lxmert-base-uncased": (
"https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"
),
},
}
__A ={
"unc-nlp/lxmert-base-uncased": 5_1_2,
}
__A ={
"unc-nlp/lxmert-base-uncased": {"do_lower_case": True},
}
class UpperCAmelCase__ ( __UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = LxmertTokenizer
def __init__( self : List[Any] , a_ : Optional[Any]=None , a_ : Optional[Any]=None , a_ : Any=True , a_ : Tuple="[UNK]" , a_ : str="[SEP]" , a_ : Optional[Any]="[PAD]" , a_ : List[str]="[CLS]" , a_ : Tuple="[MASK]" , a_ : Optional[Any]=True , a_ : Any=None , **a_ : Optional[int] , ):
'''simple docstring'''
super().__init__(
a_ , tokenizer_file=a_ , do_lower_case=a_ , unk_token=a_ , sep_token=a_ , pad_token=a_ , cls_token=a_ , mask_token=a_ , tokenize_chinese_chars=a_ , strip_accents=a_ , **a_ , )
__UpperCAmelCase : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , a_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , a_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , a_ ) != tokenize_chinese_chars
):
__UpperCAmelCase : Optional[int] = getattr(a_ , normalizer_state.pop('''type''' ) )
__UpperCAmelCase : Union[str, Any] = do_lower_case
__UpperCAmelCase : Tuple = strip_accents
__UpperCAmelCase : Any = tokenize_chinese_chars
__UpperCAmelCase : List[str] = normalizer_class(**a_ )
__UpperCAmelCase : str = do_lower_case
def snake_case__ ( self : Dict , a_ : List[Any] , a_ : Union[str, Any]=None ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__ ( self : Any , a_ : List[int] , a_ : Optional[List[int]] = None ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = [self.sep_token_id]
__UpperCAmelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self : Union[str, Any] , a_ : str , a_ : Optional[str] = None ):
'''simple docstring'''
__UpperCAmelCase : Any = self._tokenizer.model.save(a_ , name=a_ )
return tuple(a_ )
| 226 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 226 | 1 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowerCamelCase ( ) -> str:
lowercase_ : List[str] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=a__ , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=a__ , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=a__ )
return parser.parse_args()
def lowerCamelCase ( ) -> Dict:
lowercase_ : Optional[Any] = parse_args()
# Import training_script as a module.
lowercase_ : Optional[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowercase_ : Optional[int] = script_fpath.stem
lowercase_ : List[Any] = importlib.import_module(a__ )
# Patch sys.argv
lowercase_ : str = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 369 | '''simple docstring'''
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def lowerCamelCase ( UpperCAmelCase__ : int ) -> int:
lowercase_ : Any = prime_factors(UpperCAmelCase__ )
if is_square_free(UpperCAmelCase__ ):
return -1 if len(UpperCAmelCase__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase__ : Optional[Any] = {
"configuration_pix2struct": [
"PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Pix2StructConfig",
"Pix2StructTextConfig",
"Pix2StructVisionConfig",
],
"processing_pix2struct": ["Pix2StructProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[Any] = ["Pix2StructImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : str = [
"PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Pix2StructPreTrainedModel",
"Pix2StructForConditionalGeneration",
"Pix2StructVisionModel",
"Pix2StructTextModel",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
lowercase__ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 187 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
SCREAMING_SNAKE_CASE__ : Union[str, Any] = re.compile(r"\b(a|an|the)\b", re.UNICODE)
SCREAMING_SNAKE_CASE__ : int = None
def __magic_name__ ( ) -> str:
__lowerCamelCase = argparse.ArgumentParser('''Official evaluation script for SQuAD version 2.0.''' )
parser.add_argument('''data_file''' , metavar='''data.json''' , help='''Input data JSON file.''' )
parser.add_argument('''pred_file''' , metavar='''pred.json''' , help='''Model predictions.''' )
parser.add_argument(
'''--out-file''' , '''-o''' , metavar='''eval.json''' , help='''Write accuracy metrics to file (default is stdout).''' )
parser.add_argument(
'''--na-prob-file''' , '''-n''' , metavar='''na_prob.json''' , help='''Model estimates of probability of no answer.''' )
parser.add_argument(
'''--na-prob-thresh''' , '''-t''' , type=__lowerCAmelCase , default=1.0 , help='''Predict "" if no-answer probability exceeds this (default = 1.0).''' , )
parser.add_argument(
'''--out-image-dir''' , '''-p''' , metavar='''out_images''' , default=__lowerCAmelCase , help='''Save precision-recall curves to directory.''' )
parser.add_argument('''--verbose''' , '''-v''' , action='''store_true''' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def __magic_name__ ( __lowerCAmelCase : List[str] ) -> Union[str, Any]:
__lowerCamelCase = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__lowerCamelCase = bool(qa['''answers''']['''text'''] )
return qid_to_has_ans
def __magic_name__ ( __lowerCAmelCase : Dict ) -> Optional[Any]:
def remove_articles(__lowerCAmelCase : Optional[int] ):
return ARTICLES_REGEX.sub(''' ''' , __lowerCAmelCase )
def white_space_fix(__lowerCAmelCase : Optional[int] ):
return " ".join(text.split() )
def remove_punc(__lowerCAmelCase : Union[str, Any] ):
__lowerCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCAmelCase : Dict ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCAmelCase ) ) ) )
def __magic_name__ ( __lowerCAmelCase : List[Any] ) -> Optional[int]:
if not s:
return []
return normalize_answer(__lowerCAmelCase ).split()
def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple ) -> int:
return int(normalize_answer(__lowerCAmelCase ) == normalize_answer(__lowerCAmelCase ) )
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple ) -> str:
__lowerCamelCase = get_tokens(__lowerCAmelCase )
__lowerCamelCase = get_tokens(__lowerCAmelCase )
__lowerCamelCase = collections.Counter(__lowerCAmelCase ) & collections.Counter(__lowerCAmelCase )
__lowerCamelCase = sum(common.values() )
if len(__lowerCAmelCase ) == 0 or len(__lowerCAmelCase ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
__lowerCamelCase = 1.0 * num_same / len(__lowerCAmelCase )
__lowerCamelCase = 1.0 * num_same / len(__lowerCAmelCase )
__lowerCamelCase = (2 * precision * recall) / (precision + recall)
return fa
def __magic_name__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ) -> Optional[Any]:
__lowerCamelCase = {}
__lowerCamelCase = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__lowerCamelCase = qa['''id''']
__lowerCamelCase = [t for t in qa['''answers''']['''text'''] if normalize_answer(__lowerCAmelCase )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
__lowerCamelCase = ['''''']
if qid not in preds:
print(f'''Missing prediction for {qid}''' )
continue
__lowerCamelCase = preds[qid]
# Take max over all gold answers
__lowerCamelCase = max(compute_exact(__lowerCAmelCase , __lowerCAmelCase ) for a in gold_answers )
__lowerCamelCase = max(compute_fa(__lowerCAmelCase , __lowerCAmelCase ) for a in gold_answers )
return exact_scores, fa_scores
def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] ) -> List[str]:
__lowerCamelCase = {}
for qid, s in scores.items():
__lowerCamelCase = na_probs[qid] > na_prob_thresh
if pred_na:
__lowerCamelCase = float(not qid_to_has_ans[qid] )
else:
__lowerCamelCase = s
return new_scores
def __magic_name__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any]=None ) -> Union[str, Any]:
if not qid_list:
__lowerCamelCase = len(__lowerCAmelCase )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores.values() ) / total),
('''f1''', 100.0 * sum(fa_scores.values() ) / total),
('''total''', total),
] )
else:
__lowerCamelCase = len(__lowerCAmelCase )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
('''f1''', 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
('''total''', total),
] )
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] ) -> int:
for k in new_eval:
__lowerCamelCase = new_eval[k]
def __magic_name__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ) -> Optional[Any]:
plt.step(__lowerCAmelCase , __lowerCAmelCase , color='''b''' , alpha=0.2 , where='''post''' )
plt.fill_between(__lowerCAmelCase , __lowerCAmelCase , step='''post''' , alpha=0.2 , color='''b''' )
plt.xlabel('''Recall''' )
plt.ylabel('''Precision''' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__lowerCAmelCase )
plt.savefig(__lowerCAmelCase )
plt.clf()
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Tuple=None ) -> int:
__lowerCamelCase = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : na_probs[k] )
__lowerCamelCase = 0.0
__lowerCamelCase = 1.0
__lowerCamelCase = 0.0
__lowerCamelCase = [1.0]
__lowerCamelCase = [0.0]
__lowerCamelCase = 0.0
for i, qid in enumerate(__lowerCAmelCase ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
__lowerCamelCase = true_pos / float(i + 1 )
__lowerCamelCase = true_pos / float(__lowerCAmelCase )
if i == len(__lowerCAmelCase ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__lowerCAmelCase )
recalls.append(__lowerCAmelCase )
if out_image:
plot_pr_curve(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return {"ap": 100.0 * avg_prec}
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] ) -> List[Any]:
if out_image_dir and not os.path.exists(__lowerCAmelCase ):
os.makedirs(__lowerCAmelCase )
__lowerCamelCase = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
__lowerCamelCase = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , '''pr_exact.png''' ) , title='''Precision-Recall curve for Exact Match score''' , )
__lowerCamelCase = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , '''pr_f1.png''' ) , title='''Precision-Recall curve for F1 score''' , )
__lowerCamelCase = {k: float(__lowerCAmelCase ) for k, v in qid_to_has_ans.items()}
__lowerCamelCase = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , '''pr_oracle.png''' ) , title='''Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)''' , )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''pr_exact''' )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''pr_f1''' )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''pr_oracle''' )
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int ) -> Optional[Any]:
if not qid_list:
return
__lowerCamelCase = [na_probs[k] for k in qid_list]
__lowerCamelCase = np.ones_like(__lowerCAmelCase ) / float(len(__lowerCAmelCase ) )
plt.hist(__lowerCAmelCase , weights=__lowerCAmelCase , bins=20 , range=(0.0, 1.0) )
plt.xlabel('''Model probability of no-answer''' )
plt.ylabel('''Proportion of dataset''' )
plt.title(f'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(__lowerCAmelCase , f'''na_prob_hist_{name}.png''' ) )
plt.clf()
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] ) -> Optional[int]:
__lowerCamelCase = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
__lowerCamelCase = num_no_ans
__lowerCamelCase = cur_score
__lowerCamelCase = 0.0
__lowerCamelCase = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : na_probs[k] )
for i, qid in enumerate(__lowerCAmelCase ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
__lowerCamelCase = scores[qid]
else:
if preds[qid]:
__lowerCamelCase = -1
else:
__lowerCamelCase = 0
cur_score += diff
if cur_score > best_score:
__lowerCamelCase = cur_score
__lowerCamelCase = na_probs[qid]
return 100.0 * best_score / len(__lowerCAmelCase ), best_thresh
def __magic_name__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] ) -> int:
__lowerCamelCase , __lowerCamelCase = find_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase , __lowerCamelCase = find_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = best_exact
__lowerCamelCase = exact_thresh
__lowerCamelCase = best_fa
__lowerCamelCase = fa_thresh
def __magic_name__ ( ) -> Optional[int]:
with open(OPTS.data_file ) as f:
__lowerCamelCase = json.load(__lowerCAmelCase )
__lowerCamelCase = dataset_json['''data''']
with open(OPTS.pred_file ) as f:
__lowerCamelCase = json.load(__lowerCAmelCase )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
__lowerCamelCase = json.load(__lowerCAmelCase )
else:
__lowerCamelCase = {k: 0.0 for k in preds}
__lowerCamelCase = make_qid_to_has_ans(__lowerCAmelCase ) # maps qid to True/False
__lowerCamelCase = [k for k, v in qid_to_has_ans.items() if v]
__lowerCamelCase = [k for k, v in qid_to_has_ans.items() if not v]
__lowerCamelCase , __lowerCamelCase = get_raw_scores(__lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = apply_no_ans_threshold(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.na_prob_thresh )
__lowerCamelCase = apply_no_ans_threshold(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.na_prob_thresh )
__lowerCamelCase = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase )
if has_ans_qids:
__lowerCamelCase = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase , qid_list=__lowerCAmelCase )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''HasAns''' )
if no_ans_qids:
__lowerCamelCase = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase , qid_list=__lowerCAmelCase )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''NoAns''' )
if OPTS.na_prob_file:
find_all_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir )
histogram_na_prob(__lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir , '''hasAns''' )
histogram_na_prob(__lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir , '''noAns''' )
if OPTS.out_file:
with open(OPTS.out_file , '''w''' ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
else:
print(json.dumps(__lowerCAmelCase , indent=2 ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
main()
| 270 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
UpperCamelCase = None
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"""
),
},
}
UpperCamelCase = {
"""facebook/nllb-large-en-ro""": 1024,
"""facebook/nllb-200-distilled-600M""": 1024,
}
# fmt: off
UpperCamelCase = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = ["input_ids", "attention_mask"]
snake_case = NllbTokenizer
snake_case = []
snake_case = []
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , )->List[str]:
'''simple docstring'''
A_ : Any = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
A_ : Union[str, Any] = legacy_behaviour
super().__init__(
vocab_file=_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , src_lang=_SCREAMING_SNAKE_CASE , tgt_lang=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , legacy_behaviour=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
A_ : Tuple = vocab_file
A_ : List[Any] = False if not self.vocab_file else True
A_ : Union[str, Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
A_ : Optional[int] = {
lang_code: self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
A_ : Optional[Any] = src_lang if src_lang is not None else '''eng_Latn'''
A_ : Any = self.convert_tokens_to_ids(self._src_lang )
A_ : List[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _snake_case ( self )->str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->None:
'''simple docstring'''
A_ : int = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )->List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )->List[int]:
'''simple docstring'''
A_ : Any = [self.sep_token_id]
A_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->List[str]:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
A_ : Dict = src_lang
A_ : str = self(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
A_ : Tuple = self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
A_ : Any = tgt_lang_id
return inputs
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = "eng_Latn" , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "fra_Latn" , **_SCREAMING_SNAKE_CASE , )->BatchEncoding:
'''simple docstring'''
A_ : Optional[Any] = src_lang
A_ : List[Any] = tgt_lang
return super().prepare_seqaseq_batch(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->List[str]:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->None:
'''simple docstring'''
A_ : str = self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
if self.legacy_behaviour:
A_ : Union[str, Any] = []
A_ : Any = [self.eos_token_id, self.cur_lang_code]
else:
A_ : str = [self.cur_lang_code]
A_ : List[Any] = [self.eos_token_id]
A_ : List[str] = self.convert_ids_to_tokens(self.prefix_tokens )
A_ : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
A_ : Union[str, Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->None:
'''simple docstring'''
A_ : Optional[int] = self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
if self.legacy_behaviour:
A_ : List[Any] = []
A_ : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
else:
A_ : List[str] = [self.cur_lang_code]
A_ : str = [self.eos_token_id]
A_ : Any = self.convert_ids_to_tokens(self.prefix_tokens )
A_ : List[str] = self.convert_ids_to_tokens(self.suffix_tokens )
A_ : Union[str, Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )->Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory.''' )
return
A_ : str = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 65 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
UpperCamelCase = None
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""",
},
}
UpperCamelCase = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
UpperCamelCase = """▁"""
# Segments (not really needed)
UpperCamelCase = 0
UpperCamelCase = 1
UpperCamelCase = 2
UpperCamelCase = 3
UpperCamelCase = 4
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case = "left"
snake_case = XLNetTokenizer
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<sep>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<cls>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE=["<eop>", "<eod>"] , **_SCREAMING_SNAKE_CASE , )->Dict:
'''simple docstring'''
A_ : Tuple = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
vocab_file=_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , remove_space=_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
A_ : Optional[Any] = 3
A_ : List[Any] = do_lower_case
A_ : Optional[Any] = remove_space
A_ : Tuple = keep_accents
A_ : str = vocab_file
A_ : List[str] = False if not self.vocab_file else True
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )->List[int]:
'''simple docstring'''
A_ : Optional[Any] = [self.sep_token_id]
A_ : str = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )->List[int]:
'''simple docstring'''
A_ : str = [self.sep_token_id]
A_ : List[str] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )->Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
A_ : Union[str, Any] = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 65 | 1 |
import unittest
from transformers import DonutProcessor
SCREAMING_SNAKE_CASE__ : List[str] = "naver-clova-ix/donut-base"
class lowerCAmelCase__ ( unittest.TestCase ):
def __A ( self : Tuple ) -> List[Any]:
__lowerCamelCase = DonutProcessor.from_pretrained(_snake_case )
def __A ( self : List[str] ) -> Tuple:
__lowerCamelCase = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
__lowerCamelCase = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
__lowerCamelCase = self.processor.tokenajson(_snake_case )
self.assertDictEqual(_snake_case , _snake_case )
| 270 |
def UpperCAmelCase_ ( __lowerCAmelCase ) -> int:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
__lowercase : List[str] = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 156 | 0 |
from numpy import exp, pi, sqrt
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple , __UpperCamelCase : float = 0.0 , __UpperCamelCase : float = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 204 | import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
__lowerCamelCase : str = logging.get_logger(__name__)
class __snake_case ( lowerCamelCase_ ):
def __init__( self : List[str] , *_lowercase : Union[str, Any] , **_lowercase : Union[str, Any] ):
"""simple docstring"""
warnings.warn(
"""The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DonutImageProcessor instead.""" , _lowercase , )
super().__init__(*_lowercase , **_lowercase )
| 204 | 1 |
"""simple docstring"""
from collections.abc import Callable
def _snake_case ( _snake_case : Callable[[float], float] , _snake_case : float , _snake_case : float ):
lowerCAmelCase : float = a
lowerCAmelCase : float = b
if function(_snake_case ) == 0: # one of the a or b is a root for the function
return a
elif function(_snake_case ) == 0:
return b
elif (
function(_snake_case ) * function(_snake_case ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''' )
else:
lowerCAmelCase : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_snake_case ) == 0:
return mid
elif function(_snake_case ) * function(_snake_case ) < 0:
lowerCAmelCase : Dict = mid
else:
lowerCAmelCase : List[str] = mid
lowerCAmelCase : List[Any] = start + (end - start) / 2.0
return mid
def _snake_case ( _snake_case : float ):
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_000))
import doctest
doctest.testmod()
| 60 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
UpperCAmelCase__ : List[Any] = """\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
"""
UpperCAmelCase__ : List[Any] = """\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.
"""
UpperCAmelCase__ : Dict = R"""
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting \"1/2\" to \"\\frac{1}{2}\")
Examples:
>>> metric = datasets.load_metric(\"competition_math\")
>>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])
>>> print(results)
{'accuracy': 1.0}
"""
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
"""simple docstring"""
def _lowercase ( self : List[str] ) ->Optional[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/hendrycks/math""" , codebase_urls=["""https://github.com/hendrycks/math"""] , )
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] ) ->Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = 0.0
for i, j in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
n_correct += 1.0 if math_equivalence.is_equiv(UpperCAmelCase__ , UpperCAmelCase__ ) else 0.0
SCREAMING_SNAKE_CASE : Optional[Any] = n_correct / len(UpperCAmelCase__ )
return {
"accuracy": accuracy,
}
| 245 | 0 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class SCREAMING_SNAKE_CASE__ :
A_ : Any = LEDConfig
A_ : Optional[int] = {}
A_ : Union[str, Any] = 'gelu'
def __init__(self : Union[str, Any] , a__ : Union[str, Any] , a__ : Optional[int]=13 , a__ : Dict=7 , a__ : List[str]=True , a__ : List[str]=False , a__ : Union[str, Any]=99 , a__ : int=32 , a__ : Dict=2 , a__ : int=4 , a__ : Optional[int]=37 , a__ : List[Any]=0.1 , a__ : Union[str, Any]=0.1 , a__ : Tuple=20 , a__ : Optional[Any]=2 , a__ : Union[str, Any]=1 , a__ : Optional[Any]=0 , a__ : List[Any]=4 , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = eos_token_id
__snake_case = pad_token_id
__snake_case = bos_token_id
__snake_case = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
__snake_case = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
__snake_case = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def a (self : Dict ):
"""simple docstring"""
__snake_case = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__snake_case = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__snake_case = tf.concat([input_ids, eos_tensor] , axis=1 )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
__snake_case = prepare_led_inputs_dict(a__ , a__ , a__ )
__snake_case = tf.concat(
[tf.zeros_like(a__ )[:, :-1], tf.ones_like(a__ )[:, -1:]] , axis=-1 , )
__snake_case = global_attention_mask
return config, inputs_dict
def a (self : Any , a__ : Dict , a__ : Optional[Any] ):
"""simple docstring"""
__snake_case = TFLEDModel(config=a__ ).get_decoder()
__snake_case = inputs_dict['''input_ids''']
__snake_case = input_ids[:1, :]
__snake_case = inputs_dict['''attention_mask'''][:1, :]
__snake_case = 1
# first forward pass
__snake_case = model(a__ , attention_mask=a__ , use_cache=a__ )
__snake_case , __snake_case = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
__snake_case = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__snake_case = tf.concat([input_ids, next_tokens] , axis=-1 )
__snake_case = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__snake_case = model(a__ , attention_mask=a__ )[0]
__snake_case = model(a__ , attention_mask=a__ , past_key_values=a__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__snake_case = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__snake_case = output_from_no_past[:, -3:, random_slice_idx]
__snake_case = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(a__ , a__ , rtol=1E-3 )
def lowerCamelCase__ ( snake_case_ : Any , snake_case_ : int , snake_case_ : Union[str, Any] , snake_case_ : Optional[int]=None , snake_case_ : str=None , snake_case_ : Optional[Any]=None , snake_case_ : Optional[int]=None , ) -> List[str]:
if attention_mask is None:
__snake_case = tf.cast(tf.math.not_equal(snake_case_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__snake_case = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__snake_case = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__snake_case = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : Tuple = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
A_ : int = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
A_ : List[Any] = (
{
'conversational': TFLEDForConditionalGeneration,
'feature-extraction': TFLEDModel,
'summarization': TFLEDForConditionalGeneration,
'text2text-generation': TFLEDForConditionalGeneration,
'translation': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
A_ : Optional[Any] = True
A_ : List[Any] = False
A_ : Optional[Any] = False
A_ : str = False
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = TFLEDModelTester(self )
__snake_case = ConfigTester(self , config_class=a__ )
def a (self : Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
def a (self : Any ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*a__ )
def a (self : Any ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = tf.zeros_like(inputs_dict['''attention_mask'''] )
__snake_case = 2
__snake_case = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
__snake_case = True
__snake_case = self.model_tester.seq_length
__snake_case = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(a__ : Dict ):
__snake_case = outputs.decoder_attentions
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(a__ : List[Any] ):
__snake_case = [t.numpy() for t in outputs.encoder_attentions]
__snake_case = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
__snake_case = True
__snake_case = False
__snake_case = False
__snake_case = model_class(a__ )
__snake_case = model(self._prepare_for_class(a__ , a__ ) )
__snake_case = len(a__ )
self.assertEqual(config.output_hidden_states , a__ )
check_encoder_attentions_output(a__ )
if self.is_encoder_decoder:
__snake_case = model_class(a__ )
__snake_case = model(self._prepare_for_class(a__ , a__ ) )
self.assertEqual(config.output_hidden_states , a__ )
check_decoder_attentions_output(a__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__snake_case = True
__snake_case = model_class(a__ )
__snake_case = model(self._prepare_for_class(a__ , a__ ) )
self.assertEqual(config.output_hidden_states , a__ )
check_encoder_attentions_output(a__ )
# Check attention is always last and order is fine
__snake_case = True
__snake_case = True
__snake_case = model_class(a__ )
__snake_case = model(self._prepare_for_class(a__ , a__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(a__ ) )
self.assertEqual(model.config.output_hidden_states , a__ )
check_encoder_attentions_output(a__ )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def a (self : List[str] ):
"""simple docstring"""
pass
def a (self : str ):
"""simple docstring"""
pass
def lowerCamelCase__ ( snake_case_ : List[Any] ) -> Optional[Any]:
return tf.constant(snake_case_ , dtype=tf.intaa )
snake_case_ = 1E-4
@slow
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : int ):
"""simple docstring"""
__snake_case = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
__snake_case = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
__snake_case = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
__snake_case = prepare_led_inputs_dict(model.config , a__ , a__ )
__snake_case = model(**a__ )[0]
__snake_case = (1, 1024, 768)
self.assertEqual(output.shape , a__ )
# change to expected output here
__snake_case = tf.convert_to_tensor(
[[2.3_0_5_0, 2.8_2_7_9, 0.6_5_3_1], [-1.8_4_5_7, -0.1_4_5_5, -3.5_6_6_1], [-1.0_1_8_6, 0.4_5_8_6, -2.2_0_4_3]] , )
tf.debugging.assert_near(output[:, :3, :3] , a__ , atol=1E-3 )
def a (self : List[str] ):
"""simple docstring"""
__snake_case = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
__snake_case = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
__snake_case = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
__snake_case = prepare_led_inputs_dict(model.config , a__ , a__ )
__snake_case = model(**a__ )[0]
__snake_case = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , a__ )
# change to expected output here
__snake_case = tf.convert_to_tensor(
[[3_3.6_5_0_7, 6.4_5_7_2, 1_6.8_0_8_9], [5.8_7_3_9, -2.4_2_3_8, 1_1.2_9_0_2], [-3.2_1_3_9, -4.3_1_4_9, 4.2_7_8_3]] , )
tf.debugging.assert_near(output[:, :3, :3] , a__ , atol=1E-3 , rtol=1E-3 )
| 238 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
snake_case_ = data_utils.TransfoXLTokenizer
snake_case_ = data_utils.TransfoXLCorpus
snake_case_ = data_utils
snake_case_ = data_utils
def lowerCamelCase__ ( snake_case_ : Union[str, Any] , snake_case_ : int , snake_case_ : List[Any] , snake_case_ : int ) -> Dict:
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(snake_case_ , '''rb''' ) as fp:
__snake_case = pickle.load(snake_case_ , encoding='''latin1''' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
__snake_case = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file''']
print(f"""Save vocabulary to {pytorch_vocab_dump_path}""" )
__snake_case = corpus.vocab.__dict__
torch.save(snake_case_ , snake_case_ )
__snake_case = corpus.__dict__
corpus_dict_no_vocab.pop('''vocab''' , snake_case_ )
__snake_case = pytorch_dump_folder_path + '''/''' + CORPUS_NAME
print(f"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(snake_case_ , snake_case_ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
__snake_case = os.path.abspath(snake_case_ )
__snake_case = os.path.abspath(snake_case_ )
print(f"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
__snake_case = TransfoXLConfig()
else:
__snake_case = TransfoXLConfig.from_json_file(snake_case_ )
print(f"""Building PyTorch model from configuration: {config}""" )
__snake_case = TransfoXLLMHeadModel(snake_case_ )
__snake_case = load_tf_weights_in_transfo_xl(snake_case_ , snake_case_ , snake_case_ )
# Save pytorch-model
__snake_case = os.path.join(snake_case_ , snake_case_ )
__snake_case = os.path.join(snake_case_ , snake_case_ )
print(f"""Save PyTorch model to {os.path.abspath(snake_case_ )}""" )
torch.save(model.state_dict() , snake_case_ )
print(f"""Save configuration file to {os.path.abspath(snake_case_ )}""" )
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
snake_case_ = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 238 | 1 |
"""simple docstring"""
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
a :Union[str, Any] = "sshleifer/bart-tiny-random"
a :Any = "patrickvonplaten/t5-tiny-random"
@require_torch
class __a (unittest.TestCase):
'''simple docstring'''
@cached_property
def _a ( self ) -> Tuple:
"""simple docstring"""
return AutoConfig.from_pretrained(_a )
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ : List[Any] = create_student_by_copying_alternating_layers(_a , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ : Tuple = create_student_by_copying_alternating_layers(_a , tempfile.mkdtemp() , e=1 , d=_a )
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ : Optional[Any] = create_student_by_copying_alternating_layers(_a , tempfile.mkdtemp() , e=1 , d=_a )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ : str = create_student_by_copying_alternating_layers(_a , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _a ( self ) -> Dict:
"""simple docstring"""
with self.assertRaises(_a ):
create_student_by_copying_alternating_layers(_a , tempfile.mkdtemp() , e=_a , d=_a )
| 132 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a :Union[str, Any] = {
"configuration_maskformer": ["MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "MaskFormerConfig"],
"configuration_maskformer_swin": ["MaskFormerSwinConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a :int = ["MaskFormerFeatureExtractor"]
a :Dict = ["MaskFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a :List[str] = [
"MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"MaskFormerForInstanceSegmentation",
"MaskFormerModel",
"MaskFormerPreTrainedModel",
]
a :Union[str, Any] = [
"MaskFormerSwinBackbone",
"MaskFormerSwinModel",
"MaskFormerSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
a :str = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 132 | 1 |
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = inspect.getfile(accelerate.test_utils )
_A = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
_A = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def UpperCAmelCase ( self ) -> List[str]:
_A = F'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
_A = [sys.executable] + distributed_args
execute_subprocess_async(lowerCAmelCase_ , env=os.environ.copy() )
| 81 | from math import isqrt
def snake_case ( snake_case__ :int) -> list[int]:
_A = [True] * max_number
for i in range(2 , isqrt(max_number - 1) + 1):
if is_prime[i]:
for j in range(i**2 , snake_case__ , snake_case__):
_A = False
return [i for i in range(2 , snake_case__) if is_prime[i]]
def snake_case ( snake_case__ :int = 10**8) -> int:
_A = calculate_prime_numbers(max_number // 2)
_A = 0
_A = 0
_A = len(snake_case__) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 81 | 1 |
"""simple docstring"""
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
_A : Union[str, Any] = MODEL_FOR_MASKED_LM_MAPPING
_A : int = TF_MODEL_FOR_MASKED_LM_MAPPING
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Any:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def lowerCAmelCase_ ( self: str ) -> Union[str, Any]:
snake_case_ :Optional[Any] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""" )
snake_case_ :str = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(snake_case , decimals=6 ) , [
{"""sequence""": """My name is grouped""", """score""": 2.1E-05, """token""": 38_015, """token_str""": """ grouped"""},
{"""sequence""": """My name is accuser""", """score""": 2.1E-05, """token""": 25_506, """token_str""": """ accuser"""},
] , )
snake_case_ :List[Any] = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(snake_case , decimals=6 ) , [
{
"""sequence""": """The largest city in France is grouped""",
"""score""": 2.1E-05,
"""token""": 38_015,
"""token_str""": """ grouped""",
},
{
"""sequence""": """The largest city in France is accuser""",
"""score""": 2.1E-05,
"""token""": 25_506,
"""token_str""": """ accuser""",
},
] , )
snake_case_ :Any = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(snake_case , decimals=6 ) , [
{"""sequence""": """My name is Clara""", """score""": 2E-05, """token""": 13_606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Patrick""", """score""": 2E-05, """token""": 3_499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 1.9E-05, """token""": 2_941, """token_str""": """ Te"""},
] , )
@require_torch
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Union[str, Any]:
snake_case_ :List[Any] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""" )
snake_case_ :Optional[int] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(snake_case , decimals=6 ) , [
{"""sequence""": """My name is Maul""", """score""": 2.2E-05, """token""": 35_676, """token_str""": """ Maul"""},
{"""sequence""": """My name isELS""", """score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS"""},
] , )
snake_case_ :Union[str, Any] = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(snake_case , decimals=6 ) , [
{
"""sequence""": """The largest city in France is Maul""",
"""score""": 2.2E-05,
"""token""": 35_676,
"""token_str""": """ Maul""",
},
{"""sequence""": """The largest city in France isELS""", """score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS"""},
] , )
snake_case_ :str = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(snake_case , decimals=6 ) , [
{"""sequence""": """My name is Patrick""", """score""": 2.1E-05, """token""": 3_499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 2E-05, """token""": 2_941, """token_str""": """ Te"""},
{"""sequence""": """My name is Clara""", """score""": 2E-05, """token""": 13_606, """token_str""": """ Clara"""},
] , )
snake_case_ :List[Any] = unmasker("""My name is <mask> <mask>""" , top_k=2 )
self.assertEqual(
nested_simplify(snake_case , decimals=6 ) , [
[
{
"""score""": 2.2E-05,
"""token""": 35_676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is Maul<mask></s>""",
},
{"""score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""},
],
[
{
"""score""": 2.2E-05,
"""token""": 35_676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is<mask> Maul</s>""",
},
{"""score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""},
],
] , )
@require_torch_gpu
def lowerCAmelCase_ ( self: Any ) -> str:
snake_case_ :Optional[int] = pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""" )
# convert model to fp16
pipe.model.half()
snake_case_ :List[str] = pipe("""Paris is the [MASK] of France.""" )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(snake_case , snake_case )
@slow
@require_torch
def lowerCAmelCase_ ( self: Dict ) -> Dict:
snake_case_ :Any = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""" )
self.run_large_test(snake_case )
@slow
@require_tf
def lowerCAmelCase_ ( self: List[Any] ) -> Optional[int]:
snake_case_ :Any = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""" )
self.run_large_test(snake_case )
def lowerCAmelCase_ ( self: Union[str, Any] , snake_case: List[Any] ) -> Union[str, Any]:
snake_case_ :Optional[Any] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(snake_case ) , [
{"""sequence""": """My name is John""", """score""": 0.0_0_8, """token""": 610, """token_str""": """ John"""},
{"""sequence""": """My name is Chris""", """score""": 0.0_0_7, """token""": 1_573, """token_str""": """ Chris"""},
] , )
snake_case_ :List[Any] = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(snake_case ) , [
{
"""sequence""": """The largest city in France is Paris""",
"""score""": 0.2_5_1,
"""token""": 2_201,
"""token_str""": """ Paris""",
},
{
"""sequence""": """The largest city in France is Lyon""",
"""score""": 0.2_1_4,
"""token""": 12_790,
"""token_str""": """ Lyon""",
},
] , )
snake_case_ :int = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(snake_case ) , [
{"""sequence""": """My name is Patrick""", """score""": 0.0_0_5, """token""": 3_499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Clara""", """score""": 0.0_0_0, """token""": 13_606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Te""", """score""": 0.0_0_0, """token""": 2_941, """token_str""": """ Te"""},
] , )
@require_torch
def lowerCAmelCase_ ( self: Dict ) -> Optional[int]:
snake_case_ :str = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""" )
snake_case_ :Any = None
snake_case_ :Tuple = None
self.run_pipeline_test(snake_case , [] )
@require_tf
def lowerCAmelCase_ ( self: Optional[Any] ) -> Optional[int]:
snake_case_ :int = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""" )
snake_case_ :List[str] = None
snake_case_ :List[Any] = None
self.run_pipeline_test(snake_case , [] )
def lowerCAmelCase_ ( self: List[Any] , snake_case: int , snake_case: Tuple , snake_case: Optional[int] ) -> Any:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" )
snake_case_ :Union[str, Any] = FillMaskPipeline(model=snake_case , tokenizer=snake_case )
snake_case_ :str = [
f"""This is another {tokenizer.mask_token} test""",
]
return fill_masker, examples
def lowerCAmelCase_ ( self: Any , snake_case: Optional[Any] , snake_case: Tuple ) -> Union[str, Any]:
snake_case_ :Any = fill_masker.tokenizer
snake_case_ :List[Any] = fill_masker.model
snake_case_ :int = fill_masker(
f"""This is a {tokenizer.mask_token}""" , )
self.assertEqual(
snake_case , [
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
] , )
snake_case_ :Optional[int] = fill_masker([f"""This is a {tokenizer.mask_token}"""] )
self.assertEqual(
snake_case , [
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
] , )
snake_case_ :Union[str, Any] = fill_masker([f"""This is a {tokenizer.mask_token}""", f"""Another {tokenizer.mask_token} great test."""] )
self.assertEqual(
snake_case , [
[
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
],
[
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
],
] , )
with self.assertRaises(snake_case ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(snake_case ):
fill_masker("""This is""" )
self.run_test_top_k(snake_case , snake_case )
self.run_test_targets(snake_case , snake_case )
self.run_test_top_k_targets(snake_case , snake_case )
self.fill_mask_with_duplicate_targets_and_top_k(snake_case , snake_case )
self.fill_mask_with_multiple_masks(snake_case , snake_case )
def lowerCAmelCase_ ( self: Any , snake_case: int , snake_case: int ) -> int:
snake_case_ :List[str] = tokenizer.get_vocab()
snake_case_ :Dict = sorted(vocab.keys() )[:2]
# Pipeline argument
snake_case_ :Any = FillMaskPipeline(model=snake_case , tokenizer=snake_case , targets=snake_case )
snake_case_ :Optional[Any] = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
snake_case , [
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
] , )
snake_case_ :List[str] = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , snake_case )
snake_case_ :Optional[int] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(snake_case ) )
# Call argument
snake_case_ :int = FillMaskPipeline(model=snake_case , tokenizer=snake_case )
snake_case_ :Optional[int] = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=snake_case )
self.assertEqual(
snake_case , [
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
] , )
snake_case_ :Tuple = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , snake_case )
snake_case_ :Tuple = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(snake_case ) )
# Score equivalence
snake_case_ :Tuple = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=snake_case )
snake_case_ :Any = [top_mask["""token_str"""] for top_mask in outputs]
snake_case_ :Union[str, Any] = [top_mask["""score"""] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(snake_case ) == set(snake_case ):
snake_case_ :Optional[int] = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=snake_case )
snake_case_ :Union[str, Any] = [top_mask["""score"""] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(snake_case ) , nested_simplify(snake_case ) )
# Raises with invalid
with self.assertRaises(snake_case ):
snake_case_ :Union[str, Any] = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(snake_case ):
snake_case_ :int = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[""""""] )
with self.assertRaises(snake_case ):
snake_case_ :Optional[Any] = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets="""""" )
def lowerCAmelCase_ ( self: int , snake_case: List[str] , snake_case: List[str] ) -> Union[str, Any]:
snake_case_ :Union[str, Any] = FillMaskPipeline(model=snake_case , tokenizer=snake_case , top_k=2 )
snake_case_ :str = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
snake_case , [
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
] , )
snake_case_ :Optional[int] = FillMaskPipeline(model=snake_case , tokenizer=snake_case )
snake_case_ :Optional[Any] = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
snake_case , [
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
] , )
self.assertEqual(nested_simplify(snake_case ) , nested_simplify(snake_case ) )
def lowerCAmelCase_ ( self: Any , snake_case: List[str] , snake_case: List[Any] ) -> Tuple:
snake_case_ :Tuple = tokenizer.get_vocab()
snake_case_ :List[str] = FillMaskPipeline(model=snake_case , tokenizer=snake_case )
# top_k=2, ntargets=3
snake_case_ :List[Any] = sorted(vocab.keys() )[:3]
snake_case_ :Any = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=snake_case )
# If we use the most probably targets, and filter differently, we should still
# have the same results
snake_case_ :str = [el["""token_str"""] for el in sorted(snake_case , key=lambda snake_case : x["score"] , reverse=snake_case )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(snake_case ).issubset(snake_case ):
snake_case_ :Dict = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=snake_case )
# They should yield exactly the same result
self.assertEqual(nested_simplify(snake_case ) , nested_simplify(snake_case ) )
def lowerCAmelCase_ ( self: Tuple , snake_case: Optional[int] , snake_case: Dict ) -> Tuple:
snake_case_ :List[str] = FillMaskPipeline(model=snake_case , tokenizer=snake_case )
snake_case_ :Dict = tokenizer.get_vocab()
# String duplicates + id duplicates
snake_case_ :Optional[Any] = sorted(vocab.keys() )[:3]
snake_case_ :Union[str, Any] = [targets[0], targets[1], targets[0], targets[2], targets[1]]
snake_case_ :str = fill_masker(f"""My name is {tokenizer.mask_token}""" , targets=snake_case , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(snake_case ) , 3 )
def lowerCAmelCase_ ( self: str , snake_case: Optional[int] , snake_case: Dict ) -> int:
snake_case_ :Union[str, Any] = FillMaskPipeline(model=snake_case , tokenizer=snake_case )
snake_case_ :Dict = fill_masker(
f"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
snake_case , [
[
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
],
[
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
],
[
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
{"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )},
],
] , )
| 66 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> str:
lowerCamelCase__ : Optional[int] = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> List[Any]:
lowerCamelCase__ , lowerCamelCase__ : List[str] = emb.weight.shape
lowerCamelCase__ : Tuple = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase )
lowerCamelCase__ : Dict = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> str:
lowerCamelCase__ : Tuple = torch.load(_UpperCAmelCase , map_location='cpu' )
lowerCamelCase__ : List[str] = mam_aaa['args'] or mam_aaa['cfg']['model']
lowerCamelCase__ : Optional[int] = mam_aaa['model']
remove_ignore_keys_(_UpperCAmelCase )
lowerCamelCase__ : str = state_dict['encoder.embed_tokens.weight'].shape[0]
lowerCamelCase__ : Union[str, Any] = MaMaaaConfig(
vocab_size=_UpperCAmelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , )
lowerCamelCase__ : Optional[Any] = state_dict['decoder.embed_tokens.weight']
lowerCamelCase__ : Union[str, Any] = MaMaaaForConditionalGeneration(_UpperCAmelCase )
model.model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
lowerCamelCase__ : List[str] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
_UpperCAmelCase : str = parser.parse_args()
_UpperCAmelCase : Optional[Any] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 50 | 0 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowercase__ : Union[str, Any] = random.Random()
def UpperCamelCase_ ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int=1.0 , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : List[str]=None ) -> Any:
"""simple docstring"""
if rng is None:
lowerCAmelCase_ : List[Any] = global_rng
lowerCAmelCase_ : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str]=7 , SCREAMING_SNAKE_CASE_ : Dict=4_0_0 , SCREAMING_SNAKE_CASE_ : str=2_0_0_0 , SCREAMING_SNAKE_CASE_ : Optional[int]=1 , SCREAMING_SNAKE_CASE_ : int=0.0 , SCREAMING_SNAKE_CASE_ : str=1_6_0_0_0 , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : Tuple=True , ):
lowerCAmelCase_ : int = parent
lowerCAmelCase_ : List[str] = batch_size
lowerCAmelCase_ : Optional[int] = min_seq_length
lowerCAmelCase_ : Optional[int] = max_seq_length
lowerCAmelCase_ : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase_ : Union[str, Any] = feature_size
lowerCAmelCase_ : Optional[int] = padding_value
lowerCAmelCase_ : Optional[int] = sampling_rate
lowerCAmelCase_ : List[str] = return_attention_mask
lowerCAmelCase_ : str = do_normalize
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str]=False , SCREAMING_SNAKE_CASE_ : Any=False ):
def _flatten(SCREAMING_SNAKE_CASE_ : Optional[Any] ):
return list(itertools.chain(*_a ) )
if equal_length:
lowerCAmelCase_ : Dict = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCAmelCase_ : Union[str, Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase_ : List[Any] = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
class UpperCamelCase__ ( UpperCamelCase_, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
lowerCAmelCase_ : List[str] = WavaVecaFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : str ):
self.assertTrue(np.all(np.mean(_a , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(_a , axis=0 ) - 1 ) < 1E-3 ) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
lowerCAmelCase_ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase_ : str = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase_ : Optional[int] = [np.asarray(_a ) for speech_input in speech_inputs]
# Test not batched input
lowerCAmelCase_ : int = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
lowerCAmelCase_ : List[str] = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(_a , _a , atol=1E-3 ) )
# Test batched
lowerCAmelCase_ : str = feat_extract(_a , return_tensors='np' ).input_values
lowerCAmelCase_ : Union[str, Any] = feat_extract(_a , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase_ : Union[str, Any] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
lowerCAmelCase_ : List[str] = np.asarray(_a )
lowerCAmelCase_ : int = feat_extract(_a , return_tensors='np' ).input_values
lowerCAmelCase_ : Tuple = feat_extract(_a , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1E-3 ) )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
lowerCAmelCase_ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase_ : List[str] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase_ : Optional[int] = ["""longest""", """max_length""", """do_not_pad"""]
lowerCAmelCase_ : Tuple = [None, 1_6_0_0, None]
for max_length, padding in zip(_a , _a ):
lowerCAmelCase_ : List[Any] = feat_extract(_a , padding=_a , max_length=_a , return_tensors='np' )
lowerCAmelCase_ : Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
lowerCAmelCase_ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase_ : Any = range(8_0_0 , 1_4_0_0 , 2_0_0 )
lowerCAmelCase_ : List[str] = [floats_list((1, x) )[0] for x in lengths]
lowerCAmelCase_ : Optional[int] = ["""longest""", """max_length""", """do_not_pad"""]
lowerCAmelCase_ : Union[str, Any] = [None, 1_6_0_0, None]
for max_length, padding in zip(_a , _a ):
lowerCAmelCase_ : Tuple = feat_extract(_a , max_length=_a , padding=_a )
lowerCAmelCase_ : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
lowerCAmelCase_ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase_ : int = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase_ : Optional[int] = feat_extract(
_a , truncation=_a , max_length=1_0_0_0 , padding='max_length' , return_tensors='np' )
lowerCAmelCase_ : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase_ : List[str] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase_ : int = feat_extract(
_a , truncation=_a , max_length=1_0_0_0 , padding='longest' , return_tensors='np' )
lowerCAmelCase_ : List[str] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
lowerCAmelCase_ : int = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase_ : Optional[int] = feat_extract(
_a , truncation=_a , max_length=2_0_0_0 , padding='longest' , return_tensors='np' )
lowerCAmelCase_ : Optional[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
import torch
lowerCAmelCase_ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase_ : List[Any] = np.random.rand(1_0_0 ).astype(np.floataa )
lowerCAmelCase_ : Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase_ : Union[str, Any] = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCAmelCase_ : Dict = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
lowerCAmelCase_ : Tuple = WavaVecaConfig.from_pretrained(_a )
lowerCAmelCase_ : int = WavaVecaFeatureExtractor.from_pretrained(_a )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == 'layer' )
| 362 |
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase__ : Optional[int] = 1_6
lowercase__ : List[str] = 3_2
def UpperCamelCase_ ( lowerCAmelCase__ : Accelerator , lowerCAmelCase__ : int = 16 ) -> Dict:
"""simple docstring"""
lowerCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained('bert-base-cased' )
lowerCAmelCase_ : Union[str, Any] = load_dataset('glue' , 'mrpc' )
def tokenize_function(lowerCAmelCase__ : Tuple ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ : Dict = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase_ : Dict = datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ : int = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowerCAmelCase__ : List[str] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase_ : Any = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase_ : Dict = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase_ : str = 8
else:
lowerCAmelCase_ : str = None
return tokenizer.pad(
lowerCAmelCase__ , padding='longest' , max_length=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_tensors='pt' , )
# Instantiate dataloaders.
lowerCAmelCase_ : List[Any] = DataLoader(
tokenized_datasets['train'] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = DataLoader(
tokenized_datasets['validation'] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowercase__ : int = mocked_dataloaders # noqa: F811
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
if os.environ.get('TESTING_MOCKED_DATALOADERS' , lowerCAmelCase__ ) == "1":
lowerCAmelCase_ : Optional[int] = 2
# Initialize accelerator
lowerCAmelCase_ : str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ : Optional[int] = config['lr']
lowerCAmelCase_ : Tuple = int(config['num_epochs'] )
lowerCAmelCase_ : int = int(config['seed'] )
lowerCAmelCase_ : str = int(config['batch_size'] )
lowerCAmelCase_ : str = evaluate.load('glue' , 'mrpc' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=lowerCAmelCase__ )
def inner_training_loop(lowerCAmelCase__ : Optional[int] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(lowerCAmelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ : str = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=lowerCAmelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase_ : int = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase_ : List[Any] = AdamW(params=model.parameters() , lr=lowerCAmelCase__ )
lowerCAmelCase_ ,lowerCAmelCase_ : Any = get_dataloaders(lowerCAmelCase__ , lowerCAmelCase__ )
# Instantiate scheduler
lowerCAmelCase_ : Dict = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase__ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ : List[Any] = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Now we train the model
for epoch in range(lowerCAmelCase__ ):
model.train()
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCAmelCase_ : List[str] = model(**lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = outputs.loss
accelerator.backward(lowerCAmelCase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase_ : Union[str, Any] = model(**lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = outputs.logits.argmax(dim=-1 )
lowerCAmelCase_ ,lowerCAmelCase_ : Optional[Any] = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=lowerCAmelCase__ , references=lowerCAmelCase__ , )
lowerCAmelCase_ : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , lowerCAmelCase__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def UpperCamelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase_ : int = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
lowerCAmelCase_ : str = parser.parse_args()
lowerCAmelCase_ : Union[str, Any] = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 289 | 0 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
snake_case_ : Optional[int] = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def A (__A : str=None ) -> Optional[int]:
"""simple docstring"""
if subparsers is not None:
UpperCAmelCase_ = subparsers.add_parser('''tpu-config''' , description=_description )
else:
UpperCAmelCase_ = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
UpperCAmelCase_ = parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=__A , default=__A , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=__A , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=__A , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
UpperCAmelCase_ = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=__A , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=__A )
return parser
def A (__A : str ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(__A ):
UpperCAmelCase_ = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
UpperCAmelCase_ = defaults.command_file
if not args.command and defaults.commands is not None:
UpperCAmelCase_ = defaults.commands
if not args.tpu_name:
UpperCAmelCase_ = defaults.tpu_name
if not args.tpu_zone:
UpperCAmelCase_ = defaults.tpu_zone
if args.accelerate_version == "dev":
UpperCAmelCase_ = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
UpperCAmelCase_ = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , __A ):
UpperCAmelCase_ = F"""accelerate=={args.accelerate_version}"""
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
UpperCAmelCase_ = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , __A ):
UpperCAmelCase_ = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
UpperCAmelCase_ = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [F"""pip install {args.accelerate_version}"""]
new_cmd += args.command
UpperCAmelCase_ = '''; '''.join(__A )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
UpperCAmelCase_ = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"""Running {" ".join(__A )}""" )
return
subprocess.run(__A )
print('''Successfully setup pod.''' )
def A () -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = tpu_command_parser()
UpperCAmelCase_ = parser.parse_args()
tpu_command_launcher(__A )
| 51 |
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
snake_case_ : Tuple = logging.get_logger(__name__)
def A (__A : bool , __A : bool ) -> Optional[Any]:
"""simple docstring"""
def run_func(__A : Optional[Any] ):
@wraps(__A )
def run_in_eager_mode(*__A : Dict , **__A : List[Any] ):
return func(*__A , **__A )
@wraps(__A )
@tf.function(experimental_compile=__A )
def run_in_graph_mode(*__A : Optional[Any] , **__A : Any ):
return func(*__A , **__A )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'''Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.''' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def A (__A : int , __A : int , __A : int ) -> ["tf.Tensor"]:
"""simple docstring"""
UpperCAmelCase_ = random.Random()
UpperCAmelCase_ = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(__A , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class __snake_case ( a ):
UpperCAmelCase__ : TensorFlowBenchmarkArguments
UpperCAmelCase__ : PretrainedConfig
UpperCAmelCase__ : str = "TensorFlow"
@property
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
return tf.__version__
def lowerCamelCase ( self : Dict , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''')
UpperCAmelCase_ = self._prepare_inference_func(_snake_case , _snake_case , _snake_case)
return self._measure_speed(_inference)
def lowerCamelCase ( self : Any , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''')
UpperCAmelCase_ = self._prepare_train_func(_snake_case , _snake_case , _snake_case)
return self._measure_speed(_train)
def lowerCamelCase ( self : Any , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _snake_case)
UpperCAmelCase_ = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''')
UpperCAmelCase_ = self._prepare_inference_func(_snake_case , _snake_case , _snake_case)
return self._measure_memory(_inference)
def lowerCamelCase ( self : Optional[Any] , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _snake_case)
UpperCAmelCase_ = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''')
UpperCAmelCase_ = self._prepare_train_func(_snake_case , _snake_case , _snake_case)
return self._measure_memory(_train)
def lowerCamelCase ( self : Optional[int] , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''')
UpperCAmelCase_ = (
hasattr(_snake_case , '''architectures''')
and isinstance(config.architectures , _snake_case)
and len(config.architectures) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase_ = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase_ = __import__('''transformers''' , fromlist=[model_class])
UpperCAmelCase_ = getattr(_snake_case , _snake_case)
UpperCAmelCase_ = model_cls(_snake_case)
except ImportError:
raise ImportError(
F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''')
else:
UpperCAmelCase_ = TF_MODEL_MAPPING[config.__class__](_snake_case)
# encoder-decoder has vocab size saved differently
UpperCAmelCase_ = config.vocab_size if hasattr(_snake_case , '''vocab_size''') else config.encoder.vocab_size
UpperCAmelCase_ = random_input_ids(_snake_case , _snake_case , _snake_case)
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla)
def encoder_decoder_forward():
return model(_snake_case , decoder_input_ids=_snake_case , training=_snake_case)
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla)
def encoder_forward():
return model(_snake_case , training=_snake_case)
UpperCAmelCase_ = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def lowerCamelCase ( self : Optional[Any] , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('''Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.''')
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''')
UpperCAmelCase_ = (
hasattr(_snake_case , '''architectures''')
and isinstance(config.architectures , _snake_case)
and len(config.architectures) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase_ = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase_ = __import__('''transformers''' , fromlist=[model_class])
UpperCAmelCase_ = getattr(_snake_case , _snake_case)
UpperCAmelCase_ = model_cls(_snake_case)
except ImportError:
raise ImportError(
F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''')
else:
UpperCAmelCase_ = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](_snake_case)
# encoder-decoder has vocab size saved differently
UpperCAmelCase_ = config.vocab_size if hasattr(_snake_case , '''vocab_size''') else config.encoder.vocab_size
UpperCAmelCase_ = random_input_ids(_snake_case , _snake_case , _snake_case)
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla)
def encoder_decoder_train():
UpperCAmelCase_ = model(_snake_case , decoder_input_ids=_snake_case , labels=_snake_case , training=_snake_case)[0]
UpperCAmelCase_ = tf.gradients(_snake_case , model.trainable_variables)
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla)
def encoder_train():
UpperCAmelCase_ = model(_snake_case , labels=_snake_case , training=_snake_case)[0]
UpperCAmelCase_ = tf.gradients(_snake_case , model.trainable_variables)
return gradients
UpperCAmelCase_ = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def lowerCamelCase ( self : Any , _snake_case : Optional[Any]):
"""simple docstring"""
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('''Do inference on TPU. Running model 5 times to stabilize compilation''')
timeit.repeat(_snake_case , repeat=1 , number=5)
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
UpperCAmelCase_ = timeit.repeat(
_snake_case , repeat=self.args.repeat , number=10 , )
return min(_snake_case) / 1_0.0
except ResourceExhaustedError as e:
self.print_fn(F"""Doesn't fit on GPU. {e}""")
def lowerCamelCase ( self : Dict , _snake_case : Callable[[], None]):
"""simple docstring"""
logger.info(
'''Note that TensorFlow allocates more memory than '''
'''it might need to speed up computation. '''
'''The memory reported here corresponds to the memory '''
'''reported by `nvidia-smi`, which can vary depending '''
'''on total available memory on the GPU that is used.''')
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'''`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'''
''' consumption line by line.''')
UpperCAmelCase_ = start_memory_tracing('''transformers''')
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'''Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'''
''' with `args.memory=False`''')
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'''py3nvml not installed, we won\'t log GPU memory usage. '''
'''Install py3nvml (pip install py3nvml) to log information about GPU.''')
UpperCAmelCase_ = '''N/A'''
else:
logger.info(
'''Measuring total GPU usage on GPU device. Make sure to not have additional processes'''
''' running on the same GPU.''')
# init nvml
nvml.nvmlInit()
func()
UpperCAmelCase_ = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx)
UpperCAmelCase_ = nvml.nvmlDeviceGetMemoryInfo(_snake_case)
UpperCAmelCase_ = meminfo.used
UpperCAmelCase_ = Memory(_snake_case)
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'''When enabling line by line tracing, the max peak memory for CPU is inaccurate in'''
''' TensorFlow.''')
UpperCAmelCase_ = None
else:
UpperCAmelCase_ = measure_peak_memory_cpu(_snake_case)
UpperCAmelCase_ = Memory(_snake_case) if isinstance(_snake_case , _snake_case) else memory_bytes
if self.args.trace_memory_line_by_line:
UpperCAmelCase_ = stop_memory_tracing(_snake_case)
if memory is None:
UpperCAmelCase_ = summary.total
else:
UpperCAmelCase_ = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F"""Doesn't fit on GPU. {e}""")
return "N/A", None
| 51 | 1 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase__ )
class __UpperCamelCase ( lowerCamelCase__ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
lowercase : str =field(default='question-answering-extractive' , metadata={'include_in_asdict_even_if_is_default': True} )
lowercase : ClassVar[Features] =Features({'question': Value('string' ), 'context': Value('string' )} )
lowercase : ClassVar[Features] =Features(
{
'answers': Sequence(
{
'text': Value('string' ),
'answer_start': Value('int32' ),
} )
} )
lowercase : str ="question"
lowercase : str ="context"
lowercase : str ="answers"
@property
def lowercase__ ( self ):
"""simple docstring"""
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 356 |
'''simple docstring'''
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
a_ : List[str] = """src/diffusers"""
# Matches is_xxx_available()
a_ : int = re.compile(R"""is\_([a-z_]*)_available\(\)""")
# Matches from xxx import bla
a_ : List[str] = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
a_ : Optional[Any] = """
{0} = None
"""
a_ : List[Any] = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
"""
a_ : Optional[Any] = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
def a_ ( __snake_case : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ =_re_backend.findall(__snake_case )
if len(__snake_case ) == 0:
return None
return "_and_".join(__snake_case )
def a_ ( ) -> Optional[int]:
"""simple docstring"""
with open(os.path.join(__snake_case , '''__init__.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCamelCase_ =f.readlines()
# Get to the point we do the actual imports for type checking
lowerCamelCase_ =0
lowerCamelCase_ ={}
# Go through the end of the file
while line_index < len(__snake_case ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
lowerCamelCase_ =find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('''else:''' ):
line_index += 1
line_index += 1
lowerCamelCase_ =[]
# Until we unindent, add backend objects to the list
while line_index < len(__snake_case ) and len(lines[line_index] ) > 1:
lowerCamelCase_ =lines[line_index]
lowerCamelCase_ =_re_single_line_import.search(__snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(__snake_case ) > 0:
lowerCamelCase_ =objects
else:
line_index += 1
return backend_specific_objects
def a_ ( __snake_case : Dict , __snake_case : int ) -> Union[str, Any]:
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(__snake_case )
elif name.islower():
return DUMMY_FUNCTION.format(__snake_case , __snake_case )
else:
return DUMMY_CLASS.format(__snake_case , __snake_case )
def a_ ( __snake_case : Tuple=None ) -> List[str]:
"""simple docstring"""
if backend_specific_objects is None:
lowerCamelCase_ =read_init()
# For special correspondence backend to module name as used in the function requires_modulename
lowerCamelCase_ ={}
for backend, objects in backend_specific_objects.items():
lowerCamelCase_ ='''[''' + ''', '''.join(F'''"{b}"''' for b in backend.split('''_and_''' ) ) + ''']'''
lowerCamelCase_ ='''# This file is autogenerated by the command `make fix-copies`, do not edit.\n'''
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(__snake_case , __snake_case ) for o in objects] )
lowerCamelCase_ =dummy_file
return dummy_files
def a_ ( __snake_case : Dict=False ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
lowerCamelCase_ ={'''torch''': '''pt'''}
# Locate actual dummy modules and read their content.
lowerCamelCase_ =os.path.join(__snake_case , '''utils''' )
lowerCamelCase_ ={
backend: os.path.join(__snake_case , F'''dummy_{short_names.get(__snake_case , __snake_case )}_objects.py''' )
for backend in dummy_files.keys()
}
lowerCamelCase_ ={}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(__snake_case ):
with open(__snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCamelCase_ =f.read()
else:
lowerCamelCase_ =''''''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'''Updating diffusers.utils.dummy_{short_names.get(__snake_case , __snake_case )}_objects.py as the main '''
'''__init__ has new objects.''' )
with open(dummy_file_paths[backend] , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'''The main __init__ has objects that are not present in '''
F'''diffusers.utils.dummy_{short_names.get(__snake_case , __snake_case )}_objects.py. Run `make fix-copies` '''
'''to fix this.''' )
if __name__ == "__main__":
a_ : Tuple = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
a_ : Tuple = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 6 | 0 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
lowerCAmelCase__ = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = '''cpu'''
lowerCAmelCase__ = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
lowerCAmelCase__ = '''path-to-your-trained-model'''
lowerCAmelCase__ = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
lowerCAmelCase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
lowerCAmelCase__ = pipe.to(device)
# to channels last
lowerCAmelCase__ = pipe.unet.to(memory_format=torch.channels_last)
lowerCAmelCase__ = pipe.vae.to(memory_format=torch.channels_last)
lowerCAmelCase__ = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
lowerCAmelCase__ = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
lowerCAmelCase__ = torch.randn(2, 4, 6_4, 6_4)
lowerCAmelCase__ = torch.rand(1) * 9_9_9
lowerCAmelCase__ = torch.randn(2, 7_7, 7_6_8)
lowerCAmelCase__ = (sample, timestep, encoder_hidden_status)
try:
lowerCAmelCase__ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
lowerCAmelCase__ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
lowerCAmelCase__ = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
lowerCAmelCase__ = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
lowerCAmelCase__ = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
lowerCAmelCase__ = 6_6_6
lowerCAmelCase__ = torch.Generator(device).manual_seed(seed)
lowerCAmelCase__ = {'''generator''': generator}
if args.steps is not None:
lowerCAmelCase__ = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
lowerCAmelCase__ = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 130 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class snake_case__(_UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self : Dict , SCREAMING_SNAKE_CASE : int = 768 , ):
super().__init__()
lowercase__ : List[str] = nn.Parameter(torch.zeros(1 , SCREAMING_SNAKE_CASE ) )
lowercase__ : Optional[int] = nn.Parameter(torch.ones(1 , SCREAMING_SNAKE_CASE ) )
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Union[str, torch.device]] = None , SCREAMING_SNAKE_CASE : Optional[torch.dtype] = None , ):
lowercase__ : Union[str, Any] = nn.Parameter(self.mean.to(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ) )
lowercase__ : Dict = nn.Parameter(self.std.to(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ) )
return self
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
lowercase__ : Optional[int] = (embeds - self.mean) * 1.0 / self.std
return embeds
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : str ):
lowercase__ : Any = (embeds * self.std) + self.mean
return embeds
| 130 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Dict , *SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : Any ):
warnings.warn(
'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use SegformerImageProcessor instead.' , SCREAMING_SNAKE_CASE_ , )
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 289 |
"""simple docstring"""
import baseaa
def UpperCamelCase_ ( lowerCAmelCase__ : str ) -> bytes:
"""simple docstring"""
return baseaa.aaaencode(string.encode('utf-8' ) )
def UpperCamelCase_ ( lowerCAmelCase__ : bytes ) -> str:
"""simple docstring"""
return baseaa.aaadecode(lowerCAmelCase__ ).decode('utf-8' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 289 | 1 |
'''simple docstring'''
from math import factorial
def UpperCamelCase__ ( lowerCAmelCase = 20 ):
"""simple docstring"""
_lowerCAmelCase = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
_lowerCAmelCase = n // 2
return int(factorial(lowerCAmelCase ) / (factorial(lowerCAmelCase ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
A__ : str =int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number.''')
| 70 |
'''simple docstring'''
import argparse
import json
from tqdm import tqdm
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--src_path""" , type=lowerCAmelCase , default="""biencoder-nq-dev.json""" , help="""Path to raw DPR training data""" , )
parser.add_argument(
"""--evaluation_set""" , type=lowerCAmelCase , help="""where to store parsed evaluation_set file""" , )
parser.add_argument(
"""--gold_data_path""" , type=lowerCAmelCase , help="""where to store parsed gold_data_path file""" , )
_lowerCAmelCase = parser.parse_args()
with open(args.src_path , """r""" ) as src_file, open(args.evaluation_set , """w""" ) as eval_file, open(
args.gold_data_path , """w""" ) as gold_file:
_lowerCAmelCase = json.load(lowerCAmelCase )
for dpr_record in tqdm(lowerCAmelCase ):
_lowerCAmelCase = dpr_record["""question"""]
_lowerCAmelCase = [context["""title"""] for context in dpr_record["""positive_ctxs"""]]
eval_file.write(question + """\n""" )
gold_file.write("""\t""".join(lowerCAmelCase ) + """\n""" )
if __name__ == "__main__":
main()
| 70 | 1 |
from string import ascii_uppercase
UpperCAmelCase_ : Dict = {char: i for i, char in enumerate(ascii_uppercase)}
UpperCAmelCase_ : Optional[int] = dict(enumerate(ascii_uppercase))
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : str ) -> str:
"""simple docstring"""
a_ : Tuple = len(__A )
a_ : int = 0
while True:
if x == i:
a_ : Tuple = 0
if len(__A ) == len(__A ):
break
key += key[i]
i += 1
return key
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : str ) -> str:
"""simple docstring"""
a_ : Optional[int] = ''
a_ : Any = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
a_ : Optional[Any] = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : str ) -> str:
"""simple docstring"""
a_ : Any = ''
a_ : Optional[Any] = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
a_ : Union[str, Any] = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def SCREAMING_SNAKE_CASE_ ( ) -> None:
"""simple docstring"""
a_ : Tuple = 'THE GERMAN ATTACK'
a_ : Dict = 'SECRET'
a_ : Optional[Any] = generate_key(__A , __A )
a_ : Union[str, Any] = cipher_text(__A , __A )
print(F"""Encrypted Text = {s}""" )
print(F"""Original Text = {original_text(__A , __A )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 120 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
'Salesforce/instruct-blip-flan-t5': 'https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : int = '''instructblip_vision_model'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=1_4_0_8 , SCREAMING_SNAKE_CASE__ : int=6_1_4_4 , SCREAMING_SNAKE_CASE__ : Dict=3_9 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_6 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2_2_4 , SCREAMING_SNAKE_CASE__ : Optional[int]=1_4 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : List[str]=1E-6 , SCREAMING_SNAKE_CASE__ : Tuple=0.0 , SCREAMING_SNAKE_CASE__ : List[str]=1E-10 , SCREAMING_SNAKE_CASE__ : str=True , **SCREAMING_SNAKE_CASE__ : Dict , ) -> int:
super().__init__(**SCREAMING_SNAKE_CASE__ )
a_ : List[str] = hidden_size
a_ : Any = intermediate_size
a_ : str = num_hidden_layers
a_ : Dict = num_attention_heads
a_ : str = patch_size
a_ : Any = image_size
a_ : Dict = initializer_range
a_ : List[Any] = attention_dropout
a_ : Union[str, Any] = layer_norm_eps
a_ : Optional[Any] = hidden_act
a_ : Optional[Any] = qkv_bias
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Any , SCREAMING_SNAKE_CASE__ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE__ : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ )
a_ , a_ : Dict = cls.get_config_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
a_ : List[Any] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : List[Any] = '''instructblip_qformer'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3_0_5_2_2 , SCREAMING_SNAKE_CASE__ : str=7_6_8 , SCREAMING_SNAKE_CASE__ : List[str]=1_2 , SCREAMING_SNAKE_CASE__ : Any=1_2 , SCREAMING_SNAKE_CASE__ : Optional[int]=3_0_7_2 , SCREAMING_SNAKE_CASE__ : List[str]="gelu" , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=5_1_2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.02 , SCREAMING_SNAKE_CASE__ : int=1E-12 , SCREAMING_SNAKE_CASE__ : Dict=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]="absolute" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : str=1_4_0_8 , **SCREAMING_SNAKE_CASE__ : Dict , ) -> Optional[Any]:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = vocab_size
a_ : List[str] = hidden_size
a_ : Any = num_hidden_layers
a_ : Dict = num_attention_heads
a_ : Optional[int] = hidden_act
a_ : List[str] = intermediate_size
a_ : Tuple = hidden_dropout_prob
a_ : Union[str, Any] = attention_probs_dropout_prob
a_ : List[str] = max_position_embeddings
a_ : List[str] = initializer_range
a_ : Any = layer_norm_eps
a_ : Tuple = position_embedding_type
a_ : List[str] = cross_attention_frequency
a_ : Union[str, Any] = encoder_hidden_size
@classmethod
def SCREAMING_SNAKE_CASE ( cls : str , SCREAMING_SNAKE_CASE__ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ )
a_ , a_ : Union[str, Any] = cls.get_config_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
a_ : Dict = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Union[str, Any] = '''instructblip'''
snake_case__ : List[str] = True
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : int=3_2 , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple:
super().__init__(**SCREAMING_SNAKE_CASE__ )
if vision_config is None:
a_ : Dict = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
a_ : List[str] = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
a_ : Any = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
a_ : int = InstructBlipVisionConfig(**SCREAMING_SNAKE_CASE__ )
a_ : int = InstructBlipQFormerConfig(**SCREAMING_SNAKE_CASE__ )
a_ : Optional[Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
a_ : List[Any] = CONFIG_MAPPING[text_model_type](**SCREAMING_SNAKE_CASE__ )
a_ : Tuple = self.text_config.tie_word_embeddings
a_ : List[Any] = self.text_config.is_encoder_decoder
a_ : Optional[Any] = num_query_tokens
a_ : int = self.vision_config.hidden_size
a_ : List[str] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
a_ : Optional[int] = 1.0
a_ : Any = 0.02
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[Any] , SCREAMING_SNAKE_CASE__ : InstructBlipVisionConfig , SCREAMING_SNAKE_CASE__ : InstructBlipQFormerConfig , SCREAMING_SNAKE_CASE__ : PretrainedConfig , **SCREAMING_SNAKE_CASE__ : List[str] , ) -> Any:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **SCREAMING_SNAKE_CASE__ , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
a_ : List[Any] = copy.deepcopy(self.__dict__ )
a_ : Optional[int] = self.vision_config.to_dict()
a_ : Optional[Any] = self.qformer_config.to_dict()
a_ : Union[str, Any] = self.text_config.to_dict()
a_ : Optional[Any] = self.__class__.model_type
return output
| 120 | 1 |
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
__magic_name__: Optional[int] = logging.get_logger(__name__)
def UpperCamelCase ( _A, _A, _A, _A=False ):
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
if not is_sharded:
__magic_name__ : List[str] = os.path.abspath(_A )
logger.info(f'Loading PyTorch weights from {pt_path}' )
__magic_name__ : str = torch.load(_A, map_location="""cpu""" )
logger.info(f'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' )
__magic_name__ : Optional[int] = convert_pytorch_state_dict_to_flax(_A, _A )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
__magic_name__ : Union[str, Any] = convert_pytorch_sharded_state_dict_to_flax(_A, _A )
return flax_state_dict
def UpperCamelCase ( _A, _A, _A, _A, ):
"""simple docstring"""
def is_key_or_prefix_key_in_dict(_A ) -> bool:
return len(set(_A ) & {key, (model_prefix,) + key} ) > 0
# layer norm
__magic_name__ : Union[str, Any] = pt_tuple_key[:-1] + ("""scale""",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_A ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
__magic_name__ : List[Any] = pt_tuple_key[:-1] + ("""mean""",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_A ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
__magic_name__ : Tuple = pt_tuple_key[:-1] + ("""var""",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_A ):
return renamed_pt_tuple_key, pt_tensor
# embedding
__magic_name__ : Tuple = pt_tuple_key[:-1] + ("""embedding""",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_A ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
__magic_name__ : List[Any] = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_A ):
__magic_name__ : Union[str, Any] = pt_tensor.transpose(2, 3, 1, 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__magic_name__ : Dict = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_A ):
__magic_name__ : Tuple = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__magic_name__ : str = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__magic_name__ : Dict = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
__magic_name__ : Optional[Any] = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
__magic_name__ : Dict = pt_tuple_key[-2] + """_g"""
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
__magic_name__ : Optional[Any] = pt_tuple_key[-2] + """_v"""
if name is not None:
__magic_name__ : Optional[int] = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
__magic_name__ : Dict = {k: v.numpy() for k, v in pt_state_dict.items()}
__magic_name__ : List[str] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
__magic_name__ : str = flax_model.params["""params"""]
else:
__magic_name__ : List[Any] = flax_model.params
__magic_name__ : Dict = flatten_dict(_A )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
__magic_name__ : Union[str, Any] = flatten_dict(flax_model.params["""batch_stats"""] )
random_flax_state_dict.update(_A )
__magic_name__ : Optional[Any] = {}
__magic_name__ : str = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
__magic_name__ : List[Any] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__magic_name__ : int = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
__magic_name__ : Optional[int] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
__magic_name__ : Union[str, Any] = pt_tuple_key[1:]
# Correctly rename weight parameters
__magic_name__ ,__magic_name__ : List[str] = rename_key_and_reshape_tensor(
_A, _A, _A, _A )
# add model prefix if necessary
__magic_name__ : Dict = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
__magic_name__ : List[str] = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
f'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
__magic_name__ : str = jnp.asarray(_A )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_A, _A )
continue
# also add unexpected weight so that warning is thrown
__magic_name__ : Optional[Any] = jnp.asarray(_A )
else:
# also add unexpected weight so that warning is thrown
__magic_name__ : Tuple = jnp.asarray(_A )
return unflatten_dict(_A )
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
import torch
# Load the index
__magic_name__ : Union[str, Any] = {}
for shard_file in shard_filenames:
# load using msgpack utils
__magic_name__ : Optional[int] = torch.load(_A )
__magic_name__ : Optional[int] = {k: v.numpy() for k, v in pt_state_dict.items()}
__magic_name__ : int = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
__magic_name__ : List[str] = flax_model.params["""params"""]
__magic_name__ : Optional[Any] = flatten_dict(_A )
random_flax_state_dict.update(flatten_dict(flax_model.params["""batch_stats"""] ) )
else:
__magic_name__ : List[str] = flax_model.params
__magic_name__ : Optional[Any] = flatten_dict(_A )
__magic_name__ : List[str] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
__magic_name__ : Optional[Any] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__magic_name__ : int = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
__magic_name__ : Union[str, Any] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
__magic_name__ : Optional[Any] = pt_tuple_key[1:]
# Correctly rename weight parameters
__magic_name__ ,__magic_name__ : str = rename_key_and_reshape_tensor(
_A, _A, _A, _A )
# add model prefix if necessary
__magic_name__ : Union[str, Any] = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
__magic_name__ : str = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
f'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
__magic_name__ : Optional[int] = jnp.asarray(_A )
continue
if "var" in flax_key[-1]:
__magic_name__ : str = jnp.asarray(_A )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_A, _A )
continue
# also add unexpected weight so that warning is thrown
__magic_name__ : int = jnp.asarray(_A )
else:
# also add unexpected weight so that warning is thrown
__magic_name__ : List[str] = jnp.asarray(_A )
return unflatten_dict(_A )
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
__magic_name__ : Dict = os.path.abspath(_A )
logger.info(f'Loading Flax weights from {flax_checkpoint_path}' )
# import correct flax class
__magic_name__ : Union[str, Any] = getattr(_A, """Flax""" + model.__class__.__name__ )
# load flax weight dict
with open(_A, """rb""" ) as state_f:
try:
__magic_name__ : List[Any] = from_bytes(_A, state_f.read() )
except UnpicklingError:
raise EnvironmentError(f'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(_A, _A )
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
__magic_name__ : Optional[Any] = flatten_dict(jax.tree_util.tree_map(lambda _A : x.dtype == jnp.bfloataa, _A ) ).values()
if any(_A ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
__magic_name__ : Optional[Any] = jax.tree_util.tree_map(
lambda _A : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params, _A )
__magic_name__ : List[Any] = flatten_dict(_A )
__magic_name__ : Tuple = pt_model.state_dict()
__magic_name__ : Any = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
__magic_name__ : Union[str, Any] = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
__magic_name__ : int = []
__magic_name__ : List[str] = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__magic_name__ : int = flax_key_tuple[0] == pt_model.base_model_prefix
__magic_name__ : Optional[Any] = """.""".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
__magic_name__ : Dict = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
__magic_name__ : Optional[Any] = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_A ) not in pt_model_dict:
# conv layer
__magic_name__ : Union[str, Any] = flax_key_tuple[:-1] + ("""weight""",)
__magic_name__ : List[Any] = jnp.transpose(_A, (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_A ) not in pt_model_dict:
# linear layer
__magic_name__ : str = flax_key_tuple[:-1] + ("""weight""",)
__magic_name__ : str = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__magic_name__ : Any = flax_key_tuple[:-1] + ("""weight""",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
__magic_name__ : List[Any] = flax_key_tuple[:-1] + ("""running_mean""",)
elif "var" in flax_key_tuple[-1]:
__magic_name__ : Optional[int] = flax_key_tuple[:-1] + ("""running_var""",)
if "batch_stats" in flax_state:
__magic_name__ : List[str] = """.""".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
__magic_name__ : List[Any] = """.""".join(_A )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
__magic_name__ : Optional[int] = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
__magic_name__ : str = key.split(""".""" )
__magic_name__ : List[Any] = None
if key_components[-3::2] == ["parametrizations", "original0"]:
__magic_name__ : Dict = key_components[-2] + """_g"""
elif key_components[-3::2] == ["parametrizations", "original1"]:
__magic_name__ : List[str] = key_components[-2] + """_v"""
if name is not None:
__magic_name__ : Dict = key_components[:-3] + [name]
__magic_name__ : List[str] = """.""".join(_A )
__magic_name__ : List[Any] = key
if flax_key in special_pt_names:
__magic_name__ : Any = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
f'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
__magic_name__ : int = np.asarray(_A ) if not isinstance(_A, np.ndarray ) else flax_tensor
__magic_name__ : Optional[Any] = torch.from_numpy(_A )
# remove from missing keys
missing_keys.remove(_A )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_A )
pt_model.load_state_dict(_A )
# re-transform missing_keys to list
__magic_name__ : int = list(_A )
if len(_A ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
f' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
f' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
f' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
else:
logger.warning(f'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' )
if len(_A ) > 0:
logger.warning(
f'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
f' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
""" use it for predictions and inference.""" )
else:
logger.warning(
f'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'
"""If your task is similar to the task the model of the checkpoint was trained on, """
f'you can already use {pt_model.__class__.__name__} for predictions without further training.' )
return pt_model
| 342 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__: Tuple = {
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__: Union[str, Any] = [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
__magic_name__: Optional[Any] = ["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__magic_name__: Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 342 | 1 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _lowercase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def UpperCamelCase_ (self ):
"""simple docstring"""
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def UpperCamelCase_ (self ):
"""simple docstring"""
a = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(UpperCamelCase__ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self._create_example_records()
a = Dataset.from_list(UpperCamelCase__ )
self.assertListEqual(dset.column_names , ["col_1", "col_2"] )
for i, r in enumerate(UpperCamelCase__ ):
self.assertDictEqual(UpperCamelCase__ , example_records[i] )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self._create_example_records()
a = Dataset.from_list(UpperCamelCase__ )
a = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def UpperCamelCase_ (self ): # checks what happens with missing columns
"""simple docstring"""
a = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
a = Dataset.from_list(UpperCamelCase__ )
self.assertDictEqual(dset[0] , {"col_1": 1} )
self.assertDictEqual(dset[1] , {"col_1": None} ) # NB: first record is used for columns
def UpperCamelCase_ (self ): # checks if the type can be inferred from the second record
"""simple docstring"""
a = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
a = Dataset.from_list(UpperCamelCase__ )
self.assertEqual(dset.info.features["col_1"] , Sequence(Value("int64" ) ) )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = Dataset.from_list([] )
self.assertEqual(len(UpperCamelCase__ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 366 |
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowercase ( lowerCAmelCase, unittest.TestCase ):
"""simple docstring"""
__A = LxmertTokenizer
__A = LxmertTokenizerFast
__A = True
__A = True
def UpperCamelCase_ (self ):
"""simple docstring"""
super().setUp()
a = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a = "UNwant\u00E9d,running"
a = "unwanted, running"
return input_text, output_text
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.tokenizer_class(self.vocab_file )
a = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(lowerCamelCase_ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [7, 4, 5, 10, 8, 9] )
def UpperCamelCase_ (self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
a = self.get_tokenizer()
a = self.get_rust_tokenizer()
a = "I was born in 92000, and this is falsé."
a = tokenizer.tokenize(lowerCamelCase_ )
a = rust_tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
a = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
a = rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
a = self.get_rust_tokenizer()
a = tokenizer.encode(lowerCamelCase_ )
a = rust_tokenizer.encode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
| 71 | 0 |
"""simple docstring"""
class A_ :
"""simple docstring"""
def __init__( self :Tuple , lowercase_ :int ) -> Optional[int]:
UpperCAmelCase = n
UpperCAmelCase = [None] * self.n
UpperCAmelCase = 0 # index of the first element
UpperCAmelCase = 0
UpperCAmelCase = 0
def __len__( self :Tuple ) -> int:
return self.size
def UpperCAmelCase__ ( self :int ) -> bool:
return self.size == 0
def UpperCAmelCase__ ( self :Optional[int] ) -> str:
return False if self.is_empty() else self.array[self.front]
def UpperCAmelCase__ ( self :int , lowercase_ :int ) -> Dict:
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
UpperCAmelCase = data
UpperCAmelCase = (self.rear + 1) % self.n
self.size += 1
return self
def UpperCAmelCase__ ( self :str ) -> Tuple:
if self.size == 0:
raise Exception('UNDERFLOW' )
UpperCAmelCase = self.array[self.front]
UpperCAmelCase = None
UpperCAmelCase = (self.front + 1) % self.n
self.size -= 1
return temp
| 78 |
"""simple docstring"""
def _lowerCAmelCase ( lowercase_ , lowercase_ = " " ):
UpperCAmelCase = []
UpperCAmelCase = 0
for index, char in enumerate(lowercase_ ):
if char == separator:
split_words.append(string[last_index:index] )
UpperCAmelCase = index + 1
elif index + 1 == len(lowercase_ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 78 | 1 |
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
UpperCAmelCase__ : Union[str, Any] ='''src/transformers'''
UpperCAmelCase__ : Optional[Any] ='''docs/source/en'''
UpperCAmelCase__ : Any ='''.'''
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
with open(_UpperCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase =f.readlines()
# Find the start prompt.
lowerCamelCase =0
while not lines[start_index].startswith(_UpperCAmelCase ):
start_index += 1
start_index += 1
lowerCamelCase =start_index
while not lines[end_index].startswith(_UpperCAmelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
UpperCAmelCase__ : str ='''Model|Encoder|Decoder|ForConditionalGeneration'''
# Regexes that match TF/Flax/PT model names.
UpperCAmelCase__ : Dict =re.compile(r'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
UpperCAmelCase__ : List[Any] =re.compile(r'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
UpperCAmelCase__ : Any =re.compile(r'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# This is to make sure the transformers module imported is the one in the repo.
UpperCAmelCase__ : Dict =direct_transformers_import(TRANSFORMERS_PATH)
def _lowercase ( _UpperCAmelCase ) -> Dict:
lowerCamelCase =re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , _UpperCAmelCase )
return [m.group(0 ) for m in matches]
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
lowerCamelCase =2 if text == """✅""" or text == """❌""" else len(_UpperCAmelCase )
lowerCamelCase =(width - text_length) // 2
lowerCamelCase =width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def _lowercase ( ) -> Optional[int]:
lowerCamelCase =transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowerCamelCase ={
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
lowerCamelCase ={name: config.replace("""Config""" , """""" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
lowerCamelCase =collections.defaultdict(_UpperCAmelCase )
lowerCamelCase =collections.defaultdict(_UpperCAmelCase )
lowerCamelCase =collections.defaultdict(_UpperCAmelCase )
lowerCamelCase =collections.defaultdict(_UpperCAmelCase )
lowerCamelCase =collections.defaultdict(_UpperCAmelCase )
# Let's lookup through all transformers object (once).
for attr_name in dir(_UpperCAmelCase ):
lowerCamelCase =None
if attr_name.endswith("""Tokenizer""" ):
lowerCamelCase =slow_tokenizers
lowerCamelCase =attr_name[:-9]
elif attr_name.endswith("""TokenizerFast""" ):
lowerCamelCase =fast_tokenizers
lowerCamelCase =attr_name[:-13]
elif _re_tf_models.match(_UpperCAmelCase ) is not None:
lowerCamelCase =tf_models
lowerCamelCase =_re_tf_models.match(_UpperCAmelCase ).groups()[0]
elif _re_flax_models.match(_UpperCAmelCase ) is not None:
lowerCamelCase =flax_models
lowerCamelCase =_re_flax_models.match(_UpperCAmelCase ).groups()[0]
elif _re_pt_models.match(_UpperCAmelCase ) is not None:
lowerCamelCase =pt_models
lowerCamelCase =_re_pt_models.match(_UpperCAmelCase ).groups()[0]
if lookup_dict is not None:
while len(_UpperCAmelCase ) > 0:
if attr_name in model_name_to_prefix.values():
lowerCamelCase =True
break
# Try again after removing the last word in the name
lowerCamelCase ="""""".join(camel_case_split(_UpperCAmelCase )[:-1] )
# Let's build that table!
lowerCamelCase =list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
lowerCamelCase =["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
lowerCamelCase =[len(_UpperCAmelCase ) + 2 for c in columns]
lowerCamelCase =max([len(_UpperCAmelCase ) for name in model_names] ) + 2
# Build the table per se
lowerCamelCase ="""|""" + """|""".join([_center_text(_UpperCAmelCase , _UpperCAmelCase ) for c, w in zip(_UpperCAmelCase , _UpperCAmelCase )] ) + """|\n"""
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n"
lowerCamelCase ={True: """✅""", False: """❌"""}
for name in model_names:
lowerCamelCase =model_name_to_prefix[name]
lowerCamelCase =[
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(_UpperCAmelCase , _UpperCAmelCase ) for l, w in zip(_UpperCAmelCase , _UpperCAmelCase )] ) + "|\n"
return table
def _lowercase ( _UpperCAmelCase=False ) -> Optional[int]:
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase =_find_text_in_file(
filename=os.path.join(_UpperCAmelCase , """index.md""" ) , start_prompt="""<!--This table is updated automatically from the auto modules""" , end_prompt="""<!-- End table-->""" , )
lowerCamelCase =get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(_UpperCAmelCase , """index.md""" ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"""The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" )
if __name__ == "__main__":
UpperCAmelCase__ : Any =argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
UpperCAmelCase__ : List[Any] =parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 363 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __A ( a , unittest.TestCase ):
__A = BioGptTokenizer
__A = False
def _snake_case ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase =[
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
lowerCamelCase =dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
lowerCamelCase =["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
lowerCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase_ ) )
def _snake_case ( self , UpperCAmelCase_ ):
lowerCamelCase ="""lower newer"""
lowerCamelCase ="""lower newer"""
return input_text, output_text
def _snake_case ( self ):
lowerCamelCase =BioGptTokenizer(self.vocab_file , self.merges_file )
lowerCamelCase ="""lower"""
lowerCamelCase =["""low""", """er</w>"""]
lowerCamelCase =tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase =tokens + ["""<unk>"""]
lowerCamelCase =[14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , UpperCAmelCase_ )
@slow
def _snake_case ( self ):
lowerCamelCase =BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
lowerCamelCase =tokenizer.encode("""sequence builders""" , add_special_tokens=UpperCAmelCase_ )
lowerCamelCase =tokenizer.encode("""multi-sequence build""" , add_special_tokens=UpperCAmelCase_ )
lowerCamelCase =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ )
lowerCamelCase =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 262 | 0 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=100 , _lowerCAmelCase=13 , _lowerCAmelCase=30 , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=32 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=10 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , ) -> Optional[int]:
_lowerCAmelCase = parent
_lowerCAmelCase = vocab_size
_lowerCAmelCase = batch_size
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = is_training
_lowerCAmelCase = use_labels
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase = (image_size // patch_size) ** 2
_lowerCAmelCase = num_patches + 1
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
_lowerCAmelCase = FlaxBeitModel(config=lowercase_ )
_lowerCAmelCase = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
_lowerCAmelCase = FlaxBeitForMaskedImageModeling(config=lowercase_ )
_lowerCAmelCase = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
_lowerCAmelCase = self.type_sequence_label_size
_lowerCAmelCase = FlaxBeitForImageClassification(config=lowercase_ )
_lowerCAmelCase = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase = 1
_lowerCAmelCase = FlaxBeitForImageClassification(lowercase_ )
_lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase = model(lowercase_ )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.prepare_config_and_inputs()
(
_lowerCAmelCase
) = config_and_inputs
_lowerCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class lowerCAmelCase_ ( a_ ,unittest.TestCase ):
__lowerCamelCase : str = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = FlaxBeitModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def _snake_case ( self ) -> str:
self.config_tester.run_common_tests()
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(lowercase_ )
_lowerCAmelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase = [*signature.parameters.keys()]
_lowerCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase_ )
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCAmelCase = self._prepare_for_class(lowercase_ , lowercase_ )
_lowerCAmelCase = model_class(lowercase_ )
@jax.jit
def model_jitted(_lowerCAmelCase , **_lowerCAmelCase ):
return model(pixel_values=lowercase_ , **lowercase_ )
with self.subTest("JIT Enabled" ):
_lowerCAmelCase = model_jitted(**lowercase_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
_lowerCAmelCase = model_jitted(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for jitted_output, output in zip(lowercase_ , lowercase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def _snake_case ( self ) -> int:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase_ )
def _snake_case ( self ) -> Any:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def _snake_case ( self ) -> Union[str, Any]:
for model_class_name in self.all_model_classes:
_lowerCAmelCase = model_class_name.from_pretrained("microsoft/beit-base-patch16-224" )
_lowerCAmelCase = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(lowercase_ )
def __a():
'''simple docstring'''
_lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def _snake_case ( self ) -> Optional[Any]:
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = FlaxBeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" )
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=lowercase_ , return_tensors="np" ).pixel_values
# prepare bool_masked_pos
_lowerCAmelCase = np.ones((1, 196) , dtype=lowercase_ )
# forward pass
_lowerCAmelCase = model(pixel_values=lowercase_ , bool_masked_pos=lowercase_ )
_lowerCAmelCase = outputs.logits
# verify the logits
_lowerCAmelCase = (1, 196, 8192)
self.assertEqual(logits.shape , lowercase_ )
_lowerCAmelCase = np.array(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , lowercase_ , atol=1E-2 ) )
@slow
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" )
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=lowercase_ , return_tensors="np" )
# forward pass
_lowerCAmelCase = model(**lowercase_ )
_lowerCAmelCase = outputs.logits
# verify the logits
_lowerCAmelCase = (1, 1000)
self.assertEqual(logits.shape , lowercase_ )
_lowerCAmelCase = np.array([-1.2385, -1.0987, -1.0108] )
self.assertTrue(np.allclose(logits[0, :3] , lowercase_ , atol=1E-4 ) )
_lowerCAmelCase = 281
self.assertEqual(logits.argmax(-1 ).item() , lowercase_ )
@slow
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" )
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=lowercase_ , return_tensors="np" )
# forward pass
_lowerCAmelCase = model(**lowercase_ )
_lowerCAmelCase = outputs.logits
# verify the logits
_lowerCAmelCase = (1, 21841)
self.assertEqual(logits.shape , lowercase_ )
_lowerCAmelCase = np.array([1.6881, -0.2787, 0.5901] )
self.assertTrue(np.allclose(logits[0, :3] , lowercase_ , atol=1E-4 ) )
_lowerCAmelCase = 2396
self.assertEqual(logits.argmax(-1 ).item() , lowercase_ )
| 158 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self : List[Any] ,lowercase_ : Tuple ,lowercase_ : Dict ,lowercase_ : str ):
lowerCAmelCase__ : int = dataset
lowerCAmelCase__ : List[str] = process
lowerCAmelCase__ : Dict = params
def __len__( self : Any ):
return len(self.dataset )
def __getitem__( self : Union[str, Any] ,lowercase_ : List[Any] ):
lowerCAmelCase__ : Union[str, Any] = self.dataset[i]
lowerCAmelCase__ : Optional[Any] = self.process(lowercase_ ,**self.params )
return processed
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self : Optional[int] ,lowercase_ : Optional[Any] ,lowercase_ : List[Any] ,lowercase_ : Optional[Any] ,lowercase_ : Tuple=None ):
lowerCAmelCase__ : List[Any] = loader
lowerCAmelCase__ : int = infer
lowerCAmelCase__ : List[str] = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
lowerCAmelCase__ : int = None
lowerCAmelCase__ : Dict = loader_batch_size
# Internal bookkeeping
lowerCAmelCase__ : Optional[Any] = None
lowerCAmelCase__ : Optional[int] = None
def __len__( self : Union[str, Any] ):
return len(self.loader )
def __iter__( self : List[Any] ):
lowerCAmelCase__ : List[Any] = iter(self.loader )
return self
def __lowerCAmelCase ( self : Tuple ):
if isinstance(self._loader_batch_data ,torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
lowerCAmelCase__ : Tuple = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
lowerCAmelCase__ : int = {}
for k, element in self._loader_batch_data.items():
if isinstance(lowercase_ ,lowercase_ ):
# Convert ModelOutput to tuple first
lowerCAmelCase__ : List[Any] = element.to_tuple()
if isinstance(element[0] ,torch.Tensor ):
lowerCAmelCase__ : List[Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] ,np.ndarray ):
lowerCAmelCase__ : str = tuple(np.expand_dims(el[self._loader_batch_index] ,0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(lowercase_ ,lowercase_ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] ,torch.Tensor ):
lowerCAmelCase__ : Dict = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] ,np.ndarray ):
lowerCAmelCase__ : Optional[int] = tuple(np.expand_dims(el[self._loader_batch_index] ,0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
lowerCAmelCase__ : Dict = None
elif isinstance(element[self._loader_batch_index] ,torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCAmelCase__ : str = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] ,np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCAmelCase__ : Tuple = np.expand_dims(element[self._loader_batch_index] ,0 )
else:
# This is typically a list, so no need to `unsqueeze`.
lowerCAmelCase__ : int = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
lowerCAmelCase__ : int = self._loader_batch_data.__class__(lowercase_ )
self._loader_batch_index += 1
return result
def __lowerCAmelCase ( self : Optional[int] ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
lowerCAmelCase__ : Dict = next(self.iterator )
lowerCAmelCase__ : List[Any] = self.infer(lowercase_ ,**self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(lowercase_ ,torch.Tensor ):
lowerCAmelCase__ : int = processed
else:
lowerCAmelCase__ : Union[str, Any] = list(processed.keys() )[0]
lowerCAmelCase__ : Union[str, Any] = processed[key]
if isinstance(lowercase_ ,lowercase_ ):
lowerCAmelCase__ : List[Any] = len(lowercase_ )
else:
lowerCAmelCase__ : List[str] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCAmelCase__ : Optional[Any] = observed_batch_size
# Setting internal index to unwrap the batch
lowerCAmelCase__ : str = processed
lowerCAmelCase__ : Any = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self : int ,lowercase_ : str ,lowercase_ : str ,lowercase_ : Union[str, Any] ,lowercase_ : int=None ):
super().__init__(lowercase_ ,lowercase_ ,lowercase_ )
def __iter__( self : List[Any] ):
lowerCAmelCase__ : Dict = iter(self.loader )
lowerCAmelCase__ : Tuple = None
return self
def __lowerCAmelCase ( self : Optional[int] ):
if self.subiterator is None:
lowerCAmelCase__ : List[Any] = self.infer(next(self.iterator ) ,**self.params )
try:
# Try to return next item
lowerCAmelCase__ : Optional[int] = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
lowerCAmelCase__ : Any = self.infer(next(self.iterator ) ,**self.params )
lowerCAmelCase__ : int = next(self.subiterator )
return processed
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __iter__( self : Tuple ):
lowerCAmelCase__ : int = iter(self.loader )
return self
def __lowerCAmelCase ( self : List[Any] ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
lowerCAmelCase__ : Tuple = False
lowerCAmelCase__ : str = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
lowerCAmelCase__ : Dict = self.loader_batch_item()
lowerCAmelCase__ : Optional[Any] = item.pop('''is_last''' )
accumulator.append(lowercase_ )
if is_last:
return accumulator
while not is_last:
lowerCAmelCase__ : Any = self.infer(next(self.iterator ) ,**self.params )
if self.loader_batch_size is not None:
if isinstance(lowercase_ ,torch.Tensor ):
lowerCAmelCase__ : Tuple = processed
else:
lowerCAmelCase__ : List[Any] = list(processed.keys() )[0]
lowerCAmelCase__ : Union[str, Any] = processed[key]
if isinstance(lowercase_ ,lowercase_ ):
lowerCAmelCase__ : Tuple = len(lowercase_ )
else:
lowerCAmelCase__ : str = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCAmelCase__ : Optional[int] = observed_batch_size
lowerCAmelCase__ : Optional[int] = processed
lowerCAmelCase__ : Optional[int] = 0
while self._loader_batch_index < self.loader_batch_size:
lowerCAmelCase__ : Any = self.loader_batch_item()
lowerCAmelCase__ : Optional[Any] = item.pop('''is_last''' )
accumulator.append(lowercase_ )
if is_last:
return accumulator
else:
lowerCAmelCase__ : Dict = processed
lowerCAmelCase__ : Tuple = item.pop('''is_last''' )
accumulator.append(lowercase_ )
return accumulator
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self : int ,lowercase_ : Dataset ,lowercase_ : str ):
lowerCAmelCase__ : List[Any] = dataset
lowerCAmelCase__ : List[Any] = key
def __len__( self : List[Any] ):
return len(self.dataset )
def __getitem__( self : str ,lowercase_ : Union[str, Any] ):
return self.dataset[i][self.key]
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self : Dict ,lowercase_ : Dataset ,lowercase_ : str ,lowercase_ : str ):
lowerCAmelCase__ : str = dataset
lowerCAmelCase__ : List[str] = keya
lowerCAmelCase__ : Optional[Any] = keya
def __len__( self : str ):
return len(self.dataset )
def __getitem__( self : Optional[int] ,lowercase_ : Union[str, Any] ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 106 | 0 |
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = [int(_SCREAMING_SNAKE_CASE ) for i in ip_va_address.split(""".""" ) if i.isdigit()]
return len(_SCREAMING_SNAKE_CASE ) == 4 and all(0 <= int(_SCREAMING_SNAKE_CASE ) <= 254 for octet in octets )
if __name__ == "__main__":
__lowerCAmelCase = input().strip()
__lowerCAmelCase = 'valid' if is_ip_va_address_valid(ip) else 'invalid'
print(f'''{ip} is a {valid_or_invalid} IP v4 address.''') | 270 |
'''simple docstring'''
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class _lowerCAmelCase :
'''simple docstring'''
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
if dst_width < 0 or dst_height < 0:
raise ValueError("""Destination width/height should be > 0""" )
_snake_case = img
_snake_case = img.shape[1]
_snake_case = img.shape[0]
_snake_case = dst_width
_snake_case = dst_height
_snake_case = self.src_w / self.dst_w
_snake_case = self.src_h / self.dst_h
_snake_case = _snake_case = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def lowercase (self ) -> List[Any]:
for i in range(self.dst_h ):
for j in range(self.dst_w ):
_snake_case = self.img[self.get_y(UpperCAmelCase )][self.get_x(UpperCAmelCase )]
def lowercase (self , UpperCAmelCase ) -> int:
return int(self.ratio_x * x )
def lowercase (self , UpperCAmelCase ) -> int:
return int(self.ratio_y * y )
if __name__ == "__main__":
__lowerCAmelCase , __lowerCAmelCase = 800, 600
__lowerCAmelCase = imread('image_data/lena.jpg', 1)
__lowerCAmelCase = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows() | 270 | 1 |
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def lowerCamelCase ( __lowerCamelCase : int ) ->Optional[Any]:
return {key.lstrip("""-""" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def lowerCamelCase ( ) ->Tuple:
_SCREAMING_SNAKE_CASE = ArgumentParser(
"""HuggingFace Datasets CLI tool""" , usage="""datasets-cli <command> [<args>]""" , allow_abbrev=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = parser.add_subparsers(help="""datasets-cli command helpers""" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(__lowerCamelCase )
EnvironmentCommand.register_subcommand(__lowerCamelCase )
TestCommand.register_subcommand(__lowerCamelCase )
RunBeamCommand.register_subcommand(__lowerCamelCase )
DummyDataCommand.register_subcommand(__lowerCamelCase )
# Parse args
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = parser.parse_known_args()
if not hasattr(__lowerCamelCase , """func""" ):
parser.print_help()
exit(1 )
_SCREAMING_SNAKE_CASE = parse_unknown_args(__lowerCamelCase )
# Run
_SCREAMING_SNAKE_CASE = args.func(__lowerCamelCase , **__lowerCamelCase )
service.run()
if __name__ == "__main__":
main()
| 58 |
'''simple docstring'''
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
lowercase_ = logging.get_logger(__name__)
# General docstring
lowercase_ = """PoolFormerConfig"""
# Base docstring
lowercase_ = """sail/poolformer_s12"""
lowercase_ = [1, 512, 7, 7]
# Image classification docstring
lowercase_ = """sail/poolformer_s12"""
lowercase_ = """tabby, tabby cat"""
lowercase_ = [
"""sail/poolformer_s12""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def lowerCamelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : float = 0.0 , __lowerCamelCase : bool = False ) ->int:
if drop_prob == 0.0 or not training:
return input
_SCREAMING_SNAKE_CASE = 1 - drop_prob
_SCREAMING_SNAKE_CASE = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
_SCREAMING_SNAKE_CASE = keep_prob + torch.rand(__lowerCamelCase , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
_SCREAMING_SNAKE_CASE = input.div(__lowerCamelCase ) * random_tensor
return output
class a_ ( nn.Module ):
'''simple docstring'''
def __init__( self , A = None ) -> None:
super().__init__()
_SCREAMING_SNAKE_CASE = drop_prob
def snake_case_( self , A ) -> torch.Tensor:
return drop_path(A , self.drop_prob , self.training )
def snake_case_( self ) -> str:
return "p={}".format(self.drop_prob )
class a_ ( nn.Module ):
'''simple docstring'''
def __init__( self , A , A , A , A , A , A=None ) -> Union[str, Any]:
super().__init__()
_SCREAMING_SNAKE_CASE = patch_size if isinstance(A , collections.abc.Iterable ) else (patch_size, patch_size)
_SCREAMING_SNAKE_CASE = stride if isinstance(A , collections.abc.Iterable ) else (stride, stride)
_SCREAMING_SNAKE_CASE = padding if isinstance(A , collections.abc.Iterable ) else (padding, padding)
_SCREAMING_SNAKE_CASE = nn.Convad(A , A , kernel_size=A , stride=A , padding=A )
_SCREAMING_SNAKE_CASE = norm_layer(A ) if norm_layer else nn.Identity()
def snake_case_( self , A ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = self.projection(A )
_SCREAMING_SNAKE_CASE = self.norm(A )
return embeddings
class a_ ( nn.GroupNorm ):
'''simple docstring'''
def __init__( self , A , **A ) -> Union[str, Any]:
super().__init__(1 , A , **A )
class a_ ( nn.Module ):
'''simple docstring'''
def __init__( self , A ) -> Union[str, Any]:
super().__init__()
_SCREAMING_SNAKE_CASE = nn.AvgPoolad(A , stride=1 , padding=pool_size // 2 , count_include_pad=A )
def snake_case_( self , A ) -> Union[str, Any]:
return self.pool(A ) - hidden_states
class a_ ( nn.Module ):
'''simple docstring'''
def __init__( self , A , A , A , A ) -> List[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE = nn.Convad(A , A , 1 )
_SCREAMING_SNAKE_CASE = nn.Convad(A , A , 1 )
_SCREAMING_SNAKE_CASE = PoolFormerDropPath(A )
if isinstance(config.hidden_act , A ):
_SCREAMING_SNAKE_CASE = ACTaFN[config.hidden_act]
else:
_SCREAMING_SNAKE_CASE = config.hidden_act
def snake_case_( self , A ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = self.conva(A )
_SCREAMING_SNAKE_CASE = self.act_fn(A )
_SCREAMING_SNAKE_CASE = self.drop(A )
_SCREAMING_SNAKE_CASE = self.conva(A )
_SCREAMING_SNAKE_CASE = self.drop(A )
return hidden_states
class a_ ( nn.Module ):
'''simple docstring'''
def __init__( self , A , A , A , A , A , A ) -> Union[str, Any]:
super().__init__()
_SCREAMING_SNAKE_CASE = PoolFormerPooling(A )
_SCREAMING_SNAKE_CASE = PoolFormerOutput(A , A , A , A )
_SCREAMING_SNAKE_CASE = PoolFormerGroupNorm(A )
_SCREAMING_SNAKE_CASE = PoolFormerGroupNorm(A )
# Useful for training neural nets
_SCREAMING_SNAKE_CASE = PoolFormerDropPath(A ) if drop_path > 0.0 else nn.Identity()
_SCREAMING_SNAKE_CASE = config.use_layer_scale
if config.use_layer_scale:
_SCREAMING_SNAKE_CASE = nn.Parameter(
config.layer_scale_init_value * torch.ones((A) ) , requires_grad=A )
_SCREAMING_SNAKE_CASE = nn.Parameter(
config.layer_scale_init_value * torch.ones((A) ) , requires_grad=A )
def snake_case_( self , A ) -> Optional[Any]:
if self.use_layer_scale:
_SCREAMING_SNAKE_CASE = self.pooling(self.before_norm(A ) )
_SCREAMING_SNAKE_CASE = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
_SCREAMING_SNAKE_CASE = hidden_states + self.drop_path(A )
_SCREAMING_SNAKE_CASE = ()
_SCREAMING_SNAKE_CASE = self.output(self.after_norm(A ) )
_SCREAMING_SNAKE_CASE = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
_SCREAMING_SNAKE_CASE = hidden_states + self.drop_path(A )
_SCREAMING_SNAKE_CASE = (output,) + outputs
return outputs
else:
_SCREAMING_SNAKE_CASE = self.drop_path(self.pooling(self.before_norm(A ) ) )
# First residual connection
_SCREAMING_SNAKE_CASE = pooling_output + hidden_states
_SCREAMING_SNAKE_CASE = ()
# Second residual connection inside the PoolFormerOutput block
_SCREAMING_SNAKE_CASE = self.drop_path(self.output(self.after_norm(A ) ) )
_SCREAMING_SNAKE_CASE = hidden_states + layer_output
_SCREAMING_SNAKE_CASE = (output,) + outputs
return outputs
class a_ ( nn.Module ):
'''simple docstring'''
def __init__( self , A ) -> Any:
super().__init__()
_SCREAMING_SNAKE_CASE = config
# stochastic depth decay rule
_SCREAMING_SNAKE_CASE = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
_SCREAMING_SNAKE_CASE = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
_SCREAMING_SNAKE_CASE = nn.ModuleList(A )
# Transformer blocks
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
_SCREAMING_SNAKE_CASE = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
A , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(A ) )
_SCREAMING_SNAKE_CASE = nn.ModuleList(A )
def snake_case_( self , A , A=False , A=True ) -> List[Any]:
_SCREAMING_SNAKE_CASE = () if output_hidden_states else None
_SCREAMING_SNAKE_CASE = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = layers
# Get patch embeddings from hidden_states
_SCREAMING_SNAKE_CASE = embedding_layer(A )
# Send the embeddings through the blocks
for _, blk in enumerate(A ):
_SCREAMING_SNAKE_CASE = blk(A )
_SCREAMING_SNAKE_CASE = layer_outputs[0]
if output_hidden_states:
_SCREAMING_SNAKE_CASE = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=A , hidden_states=A )
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = PoolFormerConfig
UpperCamelCase = '''poolformer'''
UpperCamelCase = '''pixel_values'''
UpperCamelCase = True
def snake_case_( self , A ) -> int:
if isinstance(A , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(A , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def snake_case_( self , A , A=False ) -> Dict:
if isinstance(A , A ):
_SCREAMING_SNAKE_CASE = value
lowercase_ = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
lowercase_ = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`PoolFormerImageProcessor.__call__`] for details.
"""
@add_start_docstrings(
'''The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.''' , snake_case_ , )
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , A ) -> int:
super().__init__(A )
_SCREAMING_SNAKE_CASE = config
_SCREAMING_SNAKE_CASE = PoolFormerEncoder(A )
# Initialize weights and apply final processing
self.post_init()
def snake_case_( self ) -> Any:
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=A , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def snake_case_( self , A = None , A = None , A = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
_SCREAMING_SNAKE_CASE = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
_SCREAMING_SNAKE_CASE = self.encoder(
A , output_hidden_states=A , return_dict=A , )
_SCREAMING_SNAKE_CASE = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=A , hidden_states=encoder_outputs.hidden_states , )
class a_ ( nn.Module ):
'''simple docstring'''
def __init__( self , A ) -> Dict:
super().__init__()
_SCREAMING_SNAKE_CASE = nn.Linear(config.hidden_size , config.hidden_size )
def snake_case_( self , A ) -> str:
_SCREAMING_SNAKE_CASE = self.dense(A )
return output
@add_start_docstrings(
'''
PoolFormer Model transformer with an image classification head on top
''' , snake_case_ , )
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , A ) -> Optional[Any]:
super().__init__(A )
_SCREAMING_SNAKE_CASE = config.num_labels
_SCREAMING_SNAKE_CASE = PoolFormerModel(A )
# Final norm
_SCREAMING_SNAKE_CASE = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
_SCREAMING_SNAKE_CASE = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def snake_case_( self , A = None , A = None , A = None , A = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
_SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
_SCREAMING_SNAKE_CASE = self.poolformer(
A , output_hidden_states=A , return_dict=A , )
_SCREAMING_SNAKE_CASE = outputs[0]
_SCREAMING_SNAKE_CASE = self.classifier(self.norm(A ).mean([-2, -1] ) )
_SCREAMING_SNAKE_CASE = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_SCREAMING_SNAKE_CASE = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_SCREAMING_SNAKE_CASE = """single_label_classification"""
else:
_SCREAMING_SNAKE_CASE = """multi_label_classification"""
if self.config.problem_type == "regression":
_SCREAMING_SNAKE_CASE = MSELoss()
if self.num_labels == 1:
_SCREAMING_SNAKE_CASE = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_SCREAMING_SNAKE_CASE = loss_fct(A , A )
elif self.config.problem_type == "single_label_classification":
_SCREAMING_SNAKE_CASE = CrossEntropyLoss()
_SCREAMING_SNAKE_CASE = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_SCREAMING_SNAKE_CASE = BCEWithLogitsLoss()
_SCREAMING_SNAKE_CASE = loss_fct(A , A )
if not return_dict:
_SCREAMING_SNAKE_CASE = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=A , logits=A , hidden_states=outputs.hidden_states )
| 58 | 1 |
'''simple docstring'''
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
SCREAMING_SNAKE_CASE_: List[str] =logging.get_logger(__name__)
class __A :
a__ : str
a__ : str = None
@staticmethod
def _lowercase ():
raise NotImplementedError
def _lowercase (self : List[str] , __a : Optional[Any] , __a : int , __a : str , **__a : int ):
raise NotImplementedError
def _lowercase (self : Tuple , __a : List[Any] ):
raise NotImplementedError
def _lowercase (self : Union[str, Any] ):
if not self.is_available():
raise RuntimeError(
f"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" )
@classmethod
def _lowercase (cls : Any ):
return f"""`pip install {cls.pip_package or cls.name}`"""
class __A ( UpperCamelCase__ ):
a__ : int = """optuna"""
@staticmethod
def _lowercase ():
return is_optuna_available()
def _lowercase (self : Any , __a : Tuple , __a : int , __a : str , **__a : Tuple ):
return run_hp_search_optuna(__a , __a , __a , **__a )
def _lowercase (self : List[str] , __a : Union[str, Any] ):
return default_hp_space_optuna(__a )
class __A ( UpperCamelCase__ ):
a__ : Dict = """ray"""
a__ : str = """'ray[tune]'"""
@staticmethod
def _lowercase ():
return is_ray_available()
def _lowercase (self : List[str] , __a : List[Any] , __a : int , __a : str , **__a : Optional[int] ):
return run_hp_search_ray(__a , __a , __a , **__a )
def _lowercase (self : List[Any] , __a : List[str] ):
return default_hp_space_ray(__a )
class __A ( UpperCamelCase__ ):
a__ : List[Any] = """sigopt"""
@staticmethod
def _lowercase ():
return is_sigopt_available()
def _lowercase (self : List[str] , __a : Optional[int] , __a : int , __a : str , **__a : Optional[Any] ):
return run_hp_search_sigopt(__a , __a , __a , **__a )
def _lowercase (self : List[str] , __a : Tuple ):
return default_hp_space_sigopt(__a )
class __A ( UpperCamelCase__ ):
a__ : List[Any] = """wandb"""
@staticmethod
def _lowercase ():
return is_wandb_available()
def _lowercase (self : Optional[Any] , __a : Tuple , __a : int , __a : str , **__a : Tuple ):
return run_hp_search_wandb(__a , __a , __a , **__a )
def _lowercase (self : Union[str, Any] , __a : int ):
return default_hp_space_wandb(__a )
SCREAMING_SNAKE_CASE_: Any ={
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
UpperCAmelCase_ = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(snake_case_ ) > 0:
UpperCAmelCase_ = available_backends[0].name
if len(snake_case_ ) > 1:
logger.info(
f"""{len(snake_case_ )} hyperparameter search backends available. Using {name} as the default.""" )
return name
raise RuntimeError(
"No hyperparameter search backend available.\n"
+ "\n".join(
f""" - To install {backend.name} run {backend.pip_install()}"""
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 106 | '''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
UpperCAmelCase_ = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
UpperCAmelCase_ = Dataset.from_dict(snake_case_ )
return dataset
class __A ( UpperCamelCase__ ):
def _lowercase (self : str ):
UpperCAmelCase_ = get_dataset()
UpperCAmelCase_ = make_duplicate_clusters(__a , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = get_dataset()
UpperCAmelCase_ , UpperCAmelCase_ = deduplicate_dataset(__a )
self.assertEqual(len(__a ) , 2 )
print(__a )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , __a )
| 106 | 1 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class a__:
def __init__( self : Optional[Any] , __snake_case : Dict , __snake_case : Optional[int]=13 , __snake_case : int=7 , __snake_case : Any=True , __snake_case : Union[str, Any]=True , __snake_case : List[Any]=True , __snake_case : Optional[Any]=True , __snake_case : str=99 , __snake_case : int=64 , __snake_case : List[str]=5 , __snake_case : Tuple=4 , __snake_case : Any=37 , __snake_case : Any="gelu" , __snake_case : Any=0.1 , __snake_case : Union[str, Any]=0.1 , __snake_case : Union[str, Any]=5_12 , __snake_case : Dict=16 , __snake_case : int=2 , __snake_case : Optional[int]=0.02 , __snake_case : str=3 , __snake_case : List[Any]=4 , __snake_case : Optional[int]=None , ):
a : Optional[int] = parent
a : Tuple = batch_size
a : Optional[int] = seq_length
a : Optional[Any] = is_training
a : int = use_input_mask
a : Any = use_token_type_ids
a : str = use_labels
a : Tuple = vocab_size
a : Optional[Any] = hidden_size
a : int = num_hidden_layers
a : Dict = num_attention_heads
a : List[str] = intermediate_size
a : int = hidden_act
a : Union[str, Any] = hidden_dropout_prob
a : Dict = attention_probs_dropout_prob
a : Any = max_position_embeddings
a : List[str] = type_vocab_size
a : List[str] = type_sequence_label_size
a : Dict = initializer_range
a : Dict = num_labels
a : int = num_choices
a : Optional[Any] = scope
a : str = vocab_size - 1
def lowercase_ ( self : Optional[Any] ):
a : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a : str = None
if self.use_input_mask:
a : Any = random_attention_mask([self.batch_size, self.seq_length] )
a : Tuple = None
if self.use_labels:
a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a : Dict = self.get_config()
return config, input_ids, input_mask, token_labels
def lowercase_ ( self : Optional[int] ):
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def lowercase_ ( self : str ):
a , a , a , a : Dict = self.prepare_config_and_inputs()
a : Dict = True
return config, input_ids, input_mask, token_labels
def lowercase_ ( self : List[str] , __snake_case : int , __snake_case : str , __snake_case : List[str] ):
a : List[Any] = GPTNeoXModel(config=__snake_case )
model.to(__snake_case )
model.eval()
a : int = model(__snake_case , attention_mask=__snake_case )
a : Tuple = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : Any , __snake_case : Dict , __snake_case : int , __snake_case : Any ):
a : Optional[int] = True
a : Optional[int] = GPTNeoXModel(__snake_case )
model.to(__snake_case )
model.eval()
a : Any = model(__snake_case , attention_mask=__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : Optional[Any] , __snake_case : Dict , __snake_case : int , __snake_case : Any , __snake_case : Optional[Any] ):
a : Optional[int] = GPTNeoXForCausalLM(config=__snake_case )
model.to(__snake_case )
model.eval()
a : Optional[Any] = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self : Union[str, Any] , __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : int ):
a : Optional[Any] = self.num_labels
a : List[str] = GPTNeoXForQuestionAnswering(__snake_case )
model.to(__snake_case )
model.eval()
a : int = model(__snake_case , attention_mask=__snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self : Tuple , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : List[Any] , __snake_case : Tuple ):
a : Any = self.num_labels
a : Any = GPTNeoXForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
a : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a : Optional[int] = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self : List[Any] , __snake_case : int , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : Optional[int] ):
a : int = self.num_labels
a : List[str] = GPTNeoXForTokenClassification(__snake_case )
model.to(__snake_case )
model.eval()
a : List[str] = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self : int , __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : str ):
a : Union[str, Any] = True
a : Optional[int] = GPTNeoXForCausalLM(config=__snake_case )
model.to(__snake_case )
model.eval()
# first forward pass
a : int = model(__snake_case , attention_mask=__snake_case , use_cache=__snake_case )
a : int = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
a : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
a : List[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
a : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
a : Union[str, Any] = torch.cat([input_mask, next_mask] , dim=-1 )
a : str = model(__snake_case , attention_mask=__snake_case , output_hidden_states=__snake_case )
a : Optional[Any] = output_from_no_past['hidden_states'][0]
a : List[str] = model(
__snake_case , attention_mask=__snake_case , past_key_values=__snake_case , output_hidden_states=__snake_case , )['hidden_states'][0]
# select random slice
a : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
a : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
a : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-3 ) )
def lowercase_ ( self : Dict ):
a : Optional[int] = self.prepare_config_and_inputs()
a , a , a , a : int = config_and_inputs
a : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a__( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase__ = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase__ = (GPTNeoXForCausalLM,) if is_torch_available() else ()
lowercase__ = (
{
"""feature-extraction""": GPTNeoXModel,
"""question-answering""": GPTNeoXForQuestionAnswering,
"""text-classification""": GPTNeoXForSequenceClassification,
"""text-generation""": GPTNeoXForCausalLM,
"""token-classification""": GPTNeoXForTokenClassification,
"""zero-shot""": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def lowercase_ ( self : int ):
a : Any = GPTNeoXModelTester(self )
a : List[str] = ConfigTester(self , config_class=__snake_case , hidden_size=64 , num_attention_heads=8 )
def lowercase_ ( self : Optional[int] ):
self.config_tester.run_common_tests()
def lowercase_ ( self : List[Any] ):
a , a , a , a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__snake_case , __snake_case , __snake_case )
def lowercase_ ( self : Optional[Any] ):
a , a , a , a : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__snake_case , __snake_case , __snake_case )
def lowercase_ ( self : Tuple ):
# This regression test was failing with PyTorch < 1.3
a , a , a , a : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
a : str = None
self.model_tester.create_and_check_model_as_decoder(__snake_case , __snake_case , __snake_case )
def lowercase_ ( self : str ):
a , a , a , a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__snake_case , __snake_case , __snake_case )
def lowercase_ ( self : Optional[int] ):
a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__snake_case )
def lowercase_ ( self : Tuple ):
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__snake_case )
def lowercase_ ( self : Optional[Any] ):
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__snake_case )
def lowercase_ ( self : Any ):
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__snake_case )
@unittest.skip(reason='Feed forward chunking is not implemented' )
def lowercase_ ( self : str ):
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def lowercase_ ( self : List[Any] , __snake_case : Union[str, Any] ):
a , a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
a : str = ids_tensor([1, 10] , config.vocab_size )
a : Optional[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
a : Union[str, Any] = GPTNeoXModel(__snake_case )
original_model.to(__snake_case )
original_model.eval()
a : Tuple = original_model(__snake_case ).last_hidden_state
a : Union[str, Any] = original_model(__snake_case ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
a : Optional[Any] = {'type': scaling_type, 'factor': 10.0}
a : int = GPTNeoXModel(__snake_case )
scaled_model.to(__snake_case )
scaled_model.eval()
a : Union[str, Any] = scaled_model(__snake_case ).last_hidden_state
a : List[str] = scaled_model(__snake_case ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(__snake_case , __snake_case , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__snake_case , __snake_case , atol=1e-5 ) )
@require_torch
class a__( unittest.TestCase ):
@slow
def lowercase_ ( self : str ):
a : Dict = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped' )
for checkpointing in [True, False]:
a : Any = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(__snake_case )
a : Dict = tokenizer('My favorite food is' , return_tensors='pt' ).to(__snake_case )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
a : Optional[int] = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'
a : Optional[Any] = model.generate(**__snake_case , do_sample=__snake_case , max_new_tokens=20 )
a : int = tokenizer.batch_decode(__snake_case )[0]
self.assertEqual(__snake_case , __snake_case ) | 297 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class a__( unittest.TestCase ):
@slow
def lowercase_ ( self : List[Any] ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
a : Optional[int] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Dict = TFAutoModel.from_pretrained(__snake_case , from_pt=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Tuple = AutoModel.from_pretrained(__snake_case , from_tf=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowercase_ ( self : str ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
a : List[str] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Union[str, Any] = TFAutoModelForPreTraining.from_pretrained(__snake_case , from_pt=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : List[Any] = AutoModelForPreTraining.from_pretrained(__snake_case , from_tf=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowercase_ ( self : int ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Union[str, Any] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : List[Any] = TFAutoModelForCausalLM.from_pretrained(__snake_case , from_pt=__snake_case )
a , a : Any = TFAutoModelForCausalLM.from_pretrained(
__snake_case , output_loading_info=__snake_case , from_pt=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Dict = AutoModelForCausalLM.from_pretrained(__snake_case , from_tf=__snake_case )
a , a : Tuple = AutoModelForCausalLM.from_pretrained(
__snake_case , output_loading_info=__snake_case , from_tf=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowercase_ ( self : Any ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Tuple = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : List[str] = TFAutoModelWithLMHead.from_pretrained(__snake_case , from_pt=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Dict = AutoModelWithLMHead.from_pretrained(__snake_case , from_tf=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowercase_ ( self : Optional[int] ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : List[str] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Optional[Any] = TFAutoModelForMaskedLM.from_pretrained(__snake_case , from_pt=__snake_case )
a , a : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(
__snake_case , output_loading_info=__snake_case , from_pt=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : str = AutoModelForMaskedLM.from_pretrained(__snake_case , from_tf=__snake_case )
a , a : Tuple = AutoModelForMaskedLM.from_pretrained(
__snake_case , output_loading_info=__snake_case , from_tf=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowercase_ ( self : int ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Optional[Any] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : str = TFAutoModelForSeqaSeqLM.from_pretrained(__snake_case , from_pt=__snake_case )
a , a : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(
__snake_case , output_loading_info=__snake_case , from_pt=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Dict = AutoModelForSeqaSeqLM.from_pretrained(__snake_case , from_tf=__snake_case )
a , a : str = AutoModelForSeqaSeqLM.from_pretrained(
__snake_case , output_loading_info=__snake_case , from_tf=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowercase_ ( self : Optional[Any] ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
a : Tuple = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : List[Any] = TFAutoModelForSequenceClassification.from_pretrained(__snake_case , from_pt=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Dict = AutoModelForSequenceClassification.from_pretrained(__snake_case , from_tf=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowercase_ ( self : str ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
a : Optional[Any] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : int = TFAutoModelForQuestionAnswering.from_pretrained(__snake_case , from_pt=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Tuple = AutoModelForQuestionAnswering.from_pretrained(__snake_case , from_tf=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowercase_ ( self : Tuple ):
a : List[Any] = TFAutoModelWithLMHead.from_pretrained(__snake_case , from_pt=__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__snake_case ) , 1_44_10 )
a : Optional[int] = AutoModelWithLMHead.from_pretrained(__snake_case , from_tf=__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__snake_case ) , 1_44_10 )
def lowercase_ ( self : Any ):
a : int = TFAutoModelWithLMHead.from_pretrained(__snake_case , from_pt=__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__snake_case ) , 1_44_10 )
a : Optional[Any] = AutoModelWithLMHead.from_pretrained(__snake_case , from_tf=__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__snake_case ) , 1_44_10 ) | 297 | 1 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
lowerCamelCase__ : int =str(bin(__a ) )[2:] # remove the leading "0b"
lowerCamelCase__ : Optional[int] =str(bin(__a ) )[2:] # remove the leading "0b"
lowerCamelCase__ : Any =max(len(__a ) , len(__a ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(__a ) , b_binary.zfill(__a ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365 |
"""simple docstring"""
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
_lowercase : Tuple = logging.getLogger(__name__)
def snake_case__ ( __lowerCamelCase : torch.nn.Module , __lowerCamelCase : BnbQuantizationConfig , __lowerCamelCase : Union[str, os.PathLike] = None , __lowerCamelCase : Optional[Dict[str, Union[int, str, torch.device]]] = None , __lowerCamelCase : Optional[List[str]] = None , __lowerCamelCase : Optional[Dict[Union[int, str], Union[int, str]]] = None , __lowerCamelCase : Optional[Union[str, os.PathLike]] = None , __lowerCamelCase : bool = False , ):
"""simple docstring"""
lowerCamelCase__ : str =bnb_quantization_config.load_in_abit
lowerCamelCase__ : str =bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'''
''' make sure you have the latest version of `bitsandbytes` installed.''' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'''
'''make sure you have the latest version of `bitsandbytes` installed.''' )
lowerCamelCase__ : str =[]
# custom device map
if isinstance(__lowerCamelCase , __lowerCamelCase ) and len(device_map.keys() ) > 1:
lowerCamelCase__ : Union[str, Any] =[key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowerCamelCase__ : Any =get_keys_to_not_convert(__lowerCamelCase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(__lowerCamelCase )
lowerCamelCase__ : Tuple =bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowerCamelCase__ : Optional[Any] =[]
lowerCamelCase__ : List[Any] =bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(__lowerCamelCase )
# compatibility with peft
lowerCamelCase__ : List[str] =load_in_abit
lowerCamelCase__ : List[str] =load_in_abit
lowerCamelCase__ : Union[str, Any] =get_parameter_device(__lowerCamelCase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'''It is not recommended to quantize a loaded model. '''
'''The model should be instantiated under the `init_empty_weights` context manager.''' )
lowerCamelCase__ : str =replace_with_bnb_layers(__lowerCamelCase , __lowerCamelCase , modules_to_not_convert=__lowerCamelCase )
# convert param to the right dtype
lowerCamelCase__ : Union[str, Any] =bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
lowerCamelCase__ : Optional[int] =name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' )
lowerCamelCase__ : Dict =getattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(__lowerCamelCase ):
param.to(__lowerCamelCase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info(
f'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
'''We move the model to cuda.''' )
return model
elif weights_location is None:
raise RuntimeError(
f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
lowerCamelCase__ : Dict =replace_with_bnb_layers(
__lowerCamelCase , __lowerCamelCase , modules_to_not_convert=__lowerCamelCase )
lowerCamelCase__ : Optional[int] =get_quantized_model_device_map(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , max_memory=__lowerCamelCase , no_split_module_classes=__lowerCamelCase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowerCamelCase__ : List[str] =True
lowerCamelCase__ : Dict =any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] )
load_checkpoint_in_model(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , dtype=bnb_quantization_config.torch_dtype , offload_folder=__lowerCamelCase , offload_state_dict=__lowerCamelCase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(__lowerCamelCase , device_map=__lowerCamelCase , offload_dir=__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[int]=None ):
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
lowerCamelCase__ : List[Any] ={'''''': torch.cuda.current_device()}
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '''
'''\'sequential\'.''' )
lowerCamelCase__ : List[Any] ={}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
lowerCamelCase__ : int ={}
lowerCamelCase__ : Optional[int] =special_dtypes
lowerCamelCase__ : List[str] =no_split_module_classes
lowerCamelCase__ : Tuple =bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowerCamelCase__ : List[str] =get_balanced_memory(
__lowerCamelCase , low_zero=(device_map == '''balanced_low_0''') , max_memory=__lowerCamelCase , **__lowerCamelCase , )
lowerCamelCase__ : str =max_memory
lowerCamelCase__ : Any =infer_auto_device_map(__lowerCamelCase , **__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
# check if don't have any quantized module on the cpu
lowerCamelCase__ : List[str] =bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowerCamelCase__ : List[str] ={
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'''
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
''' )
else:
logger.info(
'''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' )
del device_map_without_some_modules
return device_map
def snake_case__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : str=None ):
"""simple docstring"""
if modules_to_not_convert is None:
lowerCamelCase__ : Dict =[]
lowerCamelCase__ , lowerCamelCase__ : List[Any] =_replace_with_bnb_layers(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def snake_case__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : int=None , __lowerCamelCase : Optional[Any]=None , ):
"""simple docstring"""
lowerCamelCase__ : Tuple =False
for name, module in model.named_children():
if current_key_name is None:
lowerCamelCase__ : Optional[Any] =[]
current_key_name.append(__lowerCamelCase )
if isinstance(__lowerCamelCase , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowerCamelCase__ : Optional[Any] ='''.'''.join(__lowerCamelCase )
lowerCamelCase__ : Tuple =True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowerCamelCase__ : Any =False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowerCamelCase__ : List[str] =bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=__lowerCamelCase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowerCamelCase__ : str =bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' )
lowerCamelCase__ : Any =module.weight.data
if module.bias is not None:
lowerCamelCase__ : Any =module.bias.data
bnb_module.requires_grad_(__lowerCamelCase )
setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : str =True
if len(list(module.children() ) ) > 0:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =_replace_with_bnb_layers(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Any =has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def snake_case__ ( __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
# Create a copy of the model
with init_empty_weights():
lowerCamelCase__ : Optional[Any] =deepcopy(__lowerCamelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowerCamelCase__ : Union[str, Any] =find_tied_parameters(__lowerCamelCase )
# For compatibility with Accelerate < 0.18
if isinstance(__lowerCamelCase , __lowerCamelCase ):
lowerCamelCase__ : List[str] =sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowerCamelCase__ : Any =sum(__lowerCamelCase , [] )
lowerCamelCase__ : Any =len(__lowerCamelCase ) > 0
# Check if it is a base model
lowerCamelCase__ : Optional[Any] =False
if hasattr(__lowerCamelCase , '''base_model_prefix''' ):
lowerCamelCase__ : Dict =not hasattr(__lowerCamelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowerCamelCase__ : List[str] =list(model.named_children() )
lowerCamelCase__ : Any =[list_modules[-1][0]]
# add last module together with tied weights
lowerCamelCase__ : Optional[Any] =set(__lowerCamelCase ) - set(__lowerCamelCase )
lowerCamelCase__ : List[str] =list(set(__lowerCamelCase ) ) + list(__lowerCamelCase )
# remove ".weight" from the keys
lowerCamelCase__ : Optional[Any] =['''.weight''', '''.bias''']
lowerCamelCase__ : List[Any] =[]
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowerCamelCase__ : Union[str, Any] =name.replace(__lowerCamelCase , '''''' )
filtered_module_names.append(__lowerCamelCase )
return filtered_module_names
def snake_case__ ( __lowerCamelCase : Tuple ):
"""simple docstring"""
for m in model.modules():
if isinstance(__lowerCamelCase , bnb.nn.Linearabit ):
return True
return False
def snake_case__ ( __lowerCamelCase : nn.Module ):
"""simple docstring"""
return next(parameter.parameters() ).device
def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(__lowerCamelCase , __lowerCamelCase , 0 , dtype=__lowerCamelCase , value=__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =param_name
lowerCamelCase__ : Dict =model
if "." in tensor_name:
lowerCamelCase__ : Optional[int] =tensor_name.split('''.''' )
for split in splits[:-1]:
lowerCamelCase__ : Union[str, Any] =getattr(__lowerCamelCase , __lowerCamelCase )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
lowerCamelCase__ : Union[str, Any] =new_module
lowerCamelCase__ : List[Any] =splits[-1]
# offload weights
lowerCamelCase__ : Optional[Any] =False
offload_weight(module._parameters[tensor_name] , __lowerCamelCase , __lowerCamelCase , index=__lowerCamelCase )
if hasattr(module._parameters[tensor_name] , '''SCB''' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , __lowerCamelCase , index=__lowerCamelCase , )
else:
offload_weight(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , index=__lowerCamelCase )
offload_weight(__lowerCamelCase , param_name.replace('''weight''' , '''SCB''' ) , __lowerCamelCase , index=__lowerCamelCase )
set_module_tensor_to_device(__lowerCamelCase , __lowerCamelCase , '''meta''' , dtype=__lowerCamelCase , value=torch.empty(*param.size() ) )
| 272 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
_a = logging.get_logger(__name__)
_a = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all MVP models at https://huggingface.co/models?filter=mvp
_a = {
'''vocab_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''',
},
'''added_tokens.json''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''',
},
'''merges_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''',
},
}
_a = {
'''RUCAIBox/mvp''': 1_0_2_4,
}
class A_ ( snake_case__ ):
_lowercase : Any = VOCAB_FILES_NAMES
_lowercase : int = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : List[Any] = ['input_ids', 'attention_mask']
_lowercase : Tuple = MvpTokenizer
def __init__( self : Optional[Any] , UpperCAmelCase : List[str]=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Any="replace" , UpperCAmelCase : List[Any]="<s>" , UpperCAmelCase : Dict="</s>" , UpperCAmelCase : Optional[Any]="</s>" , UpperCAmelCase : Tuple="<s>" , UpperCAmelCase : List[Any]="<unk>" , UpperCAmelCase : Union[str, Any]="<pad>" , UpperCAmelCase : List[Any]="<mask>" , UpperCAmelCase : Dict=False , UpperCAmelCase : Tuple=True , **UpperCAmelCase : Tuple , ) -> List[Any]:
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , errors=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase , **UpperCAmelCase , )
__lowerCAmelCase: Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , UpperCAmelCase ) != add_prefix_space:
__lowerCAmelCase: str = getattr(UpperCAmelCase , pre_tok_state.pop('type' ) )
__lowerCAmelCase: int = add_prefix_space
__lowerCAmelCase: Union[str, Any] = pre_tok_class(**UpperCAmelCase )
__lowerCAmelCase: Any = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__lowerCAmelCase: Dict = 'post_processor'
__lowerCAmelCase: List[Any] = getattr(self.backend_tokenizer , UpperCAmelCase , UpperCAmelCase )
if tokenizer_component_instance:
__lowerCAmelCase: Union[str, Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__lowerCAmelCase: int = tuple(state['sep'] )
if "cls" in state:
__lowerCAmelCase: str = tuple(state['cls'] )
__lowerCAmelCase: List[Any] = False
if state.get('add_prefix_space' , UpperCAmelCase ) != add_prefix_space:
__lowerCAmelCase: str = add_prefix_space
__lowerCAmelCase: Tuple = True
if state.get('trim_offsets' , UpperCAmelCase ) != trim_offsets:
__lowerCAmelCase: Optional[Any] = trim_offsets
__lowerCAmelCase: List[str] = True
if changes_to_apply:
__lowerCAmelCase: Union[str, Any] = getattr(UpperCAmelCase , state.pop('type' ) )
__lowerCAmelCase: Optional[int] = component_class(**UpperCAmelCase )
setattr(self.backend_tokenizer , UpperCAmelCase , UpperCAmelCase )
@property
def UpperCAmelCase ( self : int ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCAmelCase ( self : Dict , UpperCAmelCase : List[str] ) -> Any:
__lowerCAmelCase: List[str] = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else value
__lowerCAmelCase: str = value
def UpperCAmelCase ( self : Optional[Any] , *UpperCAmelCase : Any , **UpperCAmelCase : int ) -> BatchEncoding:
__lowerCAmelCase: List[Any] = kwargs.get('is_split_into_words' , UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self : Dict , *UpperCAmelCase : int , **UpperCAmelCase : Any ) -> BatchEncoding:
__lowerCAmelCase: Any = kwargs.get('is_split_into_words' , UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
__lowerCAmelCase: str = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
def UpperCAmelCase ( self : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int]=None ) -> Optional[int]:
__lowerCAmelCase: List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
__lowerCAmelCase: Tuple = [self.sep_token_id]
__lowerCAmelCase: Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 322 |
def _a ( SCREAMING_SNAKE_CASE : int ) -> bool:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: List[Any] = f'''Input value of [number={number}] must be an integer'''
raise TypeError(SCREAMING_SNAKE_CASE )
if number < 0:
return False
__lowerCAmelCase: str = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 322 | 1 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _lowercase , _lowercase=13 , _lowercase=30 , _lowercase=2 , _lowercase=3 , _lowercase=True , _lowercase=True , _lowercase=32 , _lowercase=2 , _lowercase=4 , _lowercase=37 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=10 , _lowercase=0.02 , _lowercase=3 , _lowercase=None , _lowercase=2 , ):
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = is_training
_lowerCAmelCase = use_labels
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = scope
_lowerCAmelCase = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
_lowerCAmelCase = (image_size // patch_size) ** 2
_lowerCAmelCase = num_patches + 2
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def _lowercase ( self ):
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _lowercase ( self , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = TFDeiTModel(config=UpperCamelCase__ )
_lowerCAmelCase = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = TFDeiTForMaskedImageModeling(config=UpperCamelCase__ )
_lowerCAmelCase = model(UpperCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowerCAmelCase = 1
_lowerCAmelCase = TFDeiTForMaskedImageModeling(UpperCamelCase__ )
_lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase = model(UpperCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _lowercase ( self , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = self.type_sequence_label_size
_lowerCAmelCase = TFDeiTForImageClassification(UpperCamelCase__ )
_lowerCAmelCase = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase = 1
_lowerCAmelCase = TFDeiTForImageClassification(UpperCamelCase__ )
_lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs
_lowerCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_lowercase : Any = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
_lowercase : int = (
{
'''feature-extraction''': TFDeiTModel,
'''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
_lowercase : Union[str, Any] = False
_lowercase : Optional[int] = False
_lowercase : Dict = False
_lowercase : List[str] = False
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = TFDeiTModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def _lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
_lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , tf.keras.layers.Dense ) )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(UpperCamelCase__ )
_lowerCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase = [*signature.parameters.keys()]
_lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase__ )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
def _lowercase ( self , _lowercase , _lowercase , _lowercase=False ):
"""simple docstring"""
_lowerCAmelCase = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def _lowercase ( self ):
"""simple docstring"""
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = TFDeiTModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def A ():
_lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowercase ( self ):
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = TFDeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=UpperCamelCase__ , return_tensors="""tf""" )
# forward pass
_lowerCAmelCase = model(**UpperCamelCase__ )
# verify the logits
_lowerCAmelCase = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
_lowerCAmelCase = tf.constant([-1.0266, 0.1912, -1.2861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 369 |
'''simple docstring'''
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
_lowercase = datasets.logging.get_logger(__name__)
_lowercase = """\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",
author = \"Moosavi, Nafise Sadat and
Strube, Michael\",
booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",
month = aug,
year = \"2016\",
address = \"Berlin, Germany\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/P16-1060\",
doi = \"10.18653/v1/P16-1060\",
pages = \"632--642\",
}
"""
_lowercase = """\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
"""
_lowercase = """
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting 'keep_singletons=False', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
'mentions': mentions
'muc': MUC metric [Vilain et al, 1995]
'bcub': B-cubed [Bagga and Baldwin, 1998]
'ceafe': CEAFe [Luo et al., 2005]
'lea': LEA [Moosavi and Strube, 2016]
'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric('coval')
>>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{'mentions/recall': 1.0,[...] 'conll_score': 100.0}
"""
def A (__lowerCamelCase :str , __lowerCamelCase :Optional[Any] , __lowerCamelCase :Union[str, Any]=False , __lowerCamelCase :List[Any]=False , __lowerCamelCase :str=True , __lowerCamelCase :str=False , __lowerCamelCase :str="dummy_doc" ):
_lowerCAmelCase = {doc: key_lines}
_lowerCAmelCase = {doc: sys_lines}
_lowerCAmelCase = {}
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase , _lowerCAmelCase = reader.get_doc_mentions(__lowerCamelCase , key_doc_lines[doc] , __lowerCamelCase )
key_singletons_num += singletons_num
if NP_only or min_span:
_lowerCAmelCase = reader.set_annotated_parse_trees(__lowerCamelCase , key_doc_lines[doc] , __lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase = reader.get_doc_mentions(__lowerCamelCase , sys_doc_lines[doc] , __lowerCamelCase )
sys_singletons_num += singletons_num
if NP_only or min_span:
_lowerCAmelCase = reader.set_annotated_parse_trees(__lowerCamelCase , key_doc_lines[doc] , __lowerCamelCase , __lowerCamelCase )
if remove_nested:
_lowerCAmelCase , _lowerCAmelCase = reader.remove_nested_coref_mentions(__lowerCamelCase , __lowerCamelCase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
_lowerCAmelCase , _lowerCAmelCase = reader.remove_nested_coref_mentions(__lowerCamelCase , __lowerCamelCase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
_lowerCAmelCase = reader.get_mention_assignments(__lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = reader.get_mention_assignments(__lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
f'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' )
logger.info(
"""Number of resulting singleton clusters in the key """
f'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' )
if not keep_singletons:
logger.info(
f'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '
"""files, respectively""" )
return doc_coref_infos
def A (__lowerCamelCase :List[str] , __lowerCamelCase :str , __lowerCamelCase :str , __lowerCamelCase :int , __lowerCamelCase :int , __lowerCamelCase :Optional[Any] , __lowerCamelCase :Optional[Any] ):
_lowerCAmelCase = get_coref_infos(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = {}
_lowerCAmelCase = 0
_lowerCAmelCase = 0
for name, metric in metrics:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = evaluator.evaluate_documents(__lowerCamelCase , __lowerCamelCase , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f'{name}/recall': recall, f'{name}/precision': precision, f'{name}/f1': fa} )
logger.info(
name.ljust(10 ) , f'Recall: {recall * 100:.2f}' , f' Precision: {precision * 100:.2f}' , f' F1: {fa * 100:.2f}' , )
if conll_subparts_num == 3:
_lowerCAmelCase = (conll / 3) * 100
logger.info(f'CoNLL score: {conll:.2f}' )
output_scores.update({"""conll_score""": conll} )
return output_scores
def A (__lowerCamelCase :List[str] ):
_lowerCAmelCase = False
for line in key_lines:
if not line.startswith("""#""" ):
if len(line.split() ) > 6:
_lowerCAmelCase = line.split()[5]
if not parse_col == "-":
_lowerCAmelCase = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def _lowercase ( self , _lowercase , _lowercase , _lowercase=True , _lowercase=False , _lowercase=False , _lowercase=False ):
"""simple docstring"""
_lowerCAmelCase = [
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
_lowerCAmelCase = util.check_gold_parse_annotation(_lowercase )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
_lowerCAmelCase = evaluate(
key_lines=_lowercase , sys_lines=_lowercase , metrics=_lowercase , NP_only=_lowercase , remove_nested=_lowercase , keep_singletons=_lowercase , min_span=_lowercase , )
return score
| 229 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
A__ : str = logging.get_logger(__name__)
A__ : Any = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A__ : str = {
'''vocab_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-openqa''': (
'''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-reader''': (
'''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-openqa''': (
'''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-reader''': (
'''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'''
),
},
}
A__ : Union[str, Any] = {
'''google/realm-cc-news-pretrained-embedder''': 512,
'''google/realm-cc-news-pretrained-encoder''': 512,
'''google/realm-cc-news-pretrained-scorer''': 512,
'''google/realm-cc-news-pretrained-openqa''': 512,
'''google/realm-orqa-nq-openqa''': 512,
'''google/realm-orqa-nq-reader''': 512,
'''google/realm-orqa-wq-openqa''': 512,
'''google/realm-orqa-wq-reader''': 512,
}
A__ : Dict = {
'''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-reader''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-reader''': {'''do_lower_case''': True},
}
class __snake_case ( UpperCamelCase_ ):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_INIT_CONFIGURATION
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = RealmTokenizer
def __init__( self : int , A_ : Optional[int]=None , A_ : Optional[Any]=None , A_ : Optional[Any]=True , A_ : Optional[int]="[UNK]" , A_ : List[Any]="[SEP]" , A_ : List[Any]="[PAD]" , A_ : Optional[Any]="[CLS]" , A_ : Dict="[MASK]" , A_ : List[Any]=True , A_ : List[str]=None , **A_ : List[str] , ):
super().__init__(
A_ , tokenizer_file=A_ , do_lower_case=A_ , unk_token=A_ , sep_token=A_ , pad_token=A_ , cls_token=A_ , mask_token=A_ , tokenize_chinese_chars=A_ , strip_accents=A_ , **A_ , )
lowerCAmelCase_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('''lowercase''' , A_) != do_lower_case
or normalizer_state.get('''strip_accents''' , A_) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , A_) != tokenize_chinese_chars
):
lowerCAmelCase_ : int = getattr(A_ , normalizer_state.pop('''type'''))
lowerCAmelCase_ : str = do_lower_case
lowerCAmelCase_ : Dict = strip_accents
lowerCAmelCase_ : Optional[Any] = tokenize_chinese_chars
lowerCAmelCase_ : Union[str, Any] = normalizer_class(**A_)
lowerCAmelCase_ : Any = do_lower_case
def UpperCAmelCase__ ( self : Optional[Any] , A_ : Optional[Any] , **A_ : Tuple):
lowerCAmelCase_ : List[str] = PaddingStrategy.MAX_LENGTH
lowerCAmelCase_ : str = text
lowerCAmelCase_ : int = kwargs.pop('''text_pair''' , A_)
lowerCAmelCase_ : str = kwargs.pop('''return_tensors''' , A_)
lowerCAmelCase_ : int = {
'''input_ids''': [],
'''attention_mask''': [],
'''token_type_ids''': [],
}
for idx, candidate_text in enumerate(A_):
if batch_text_pair is not None:
lowerCAmelCase_ : List[Any] = batch_text_pair[idx]
else:
lowerCAmelCase_ : List[Any] = None
lowerCAmelCase_ : int = super().__call__(A_ , A_ , return_tensors=A_ , **A_)
lowerCAmelCase_ : Optional[Any] = encoded_candidates.get('''input_ids''')
lowerCAmelCase_ : List[str] = encoded_candidates.get('''attention_mask''')
lowerCAmelCase_ : Optional[Any] = encoded_candidates.get('''token_type_ids''')
if encoded_input_ids is not None:
output_data["input_ids"].append(A_)
if encoded_attention_mask is not None:
output_data["attention_mask"].append(A_)
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(A_)
lowerCAmelCase_ : List[str] = {key: item for key, item in output_data.items() if len(A_) != 0}
return BatchEncoding(A_ , tensor_type=A_)
def UpperCAmelCase__ ( self : List[str] , A_ : Tuple , A_ : List[Any]=None):
lowerCAmelCase_ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__ ( self : Tuple , A_ : List[int] , A_ : Optional[List[int]] = None):
lowerCAmelCase_ : Tuple = [self.sep_token_id]
lowerCAmelCase_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def UpperCAmelCase__ ( self : List[str] , A_ : str , A_ : Optional[str] = None):
lowerCAmelCase_ : List[str] = self._tokenizer.model.save(A_ , name=A_)
return tuple(A_)
| 103 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A__ : List[Any] = logging.get_logger(__name__)
def UpperCamelCase( __UpperCamelCase : List[str] ):
lowerCAmelCase_ : Any = DPTConfig(embedding_type='''hybrid''' )
if "large" in checkpoint_url:
lowerCAmelCase_ : Any = 1024
lowerCAmelCase_ : Tuple = 4096
lowerCAmelCase_ : List[Any] = 24
lowerCAmelCase_ : int = 16
lowerCAmelCase_ : Tuple = [5, 11, 17, 23]
lowerCAmelCase_ : Optional[Any] = [256, 512, 1024, 1024]
lowerCAmelCase_ : str = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
lowerCAmelCase_ : Union[str, Any] = 768
lowerCAmelCase_ : int = [1, 1, 1, 0.5]
lowerCAmelCase_ : List[str] = [256, 512, 768, 768]
lowerCAmelCase_ : int = 150
lowerCAmelCase_ : Any = 16
lowerCAmelCase_ : Any = (1, 384, 384)
lowerCAmelCase_ : List[str] = False
lowerCAmelCase_ : Union[str, Any] = '''project'''
if "ade" in checkpoint_url:
lowerCAmelCase_ : Any = True
lowerCAmelCase_ : Any = 768
lowerCAmelCase_ : Optional[int] = [1, 1, 1, 0.5]
lowerCAmelCase_ : Union[str, Any] = 150
lowerCAmelCase_ : Any = 16
lowerCAmelCase_ : Any = '''huggingface/label-files'''
lowerCAmelCase_ : Any = '''ade20k-id2label.json'''
lowerCAmelCase_ : str = json.load(open(cached_download(hf_hub_url(__UpperCamelCase ,__UpperCamelCase ,repo_type='''dataset''' ) ) ,'''r''' ) )
lowerCAmelCase_ : Optional[Any] = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
lowerCAmelCase_ : Optional[Any] = idalabel
lowerCAmelCase_ : Optional[int] = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ : List[Any] = [1, 150, 480, 480]
return config, expected_shape
def UpperCamelCase( __UpperCamelCase : Optional[int] ):
lowerCAmelCase_ : int = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(__UpperCamelCase ,__UpperCamelCase )
def UpperCamelCase( __UpperCamelCase : int ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowerCAmelCase_ : Dict = name.replace('''pretrained.model''' ,'''dpt.encoder''' )
if "pretrained.model" in name:
lowerCAmelCase_ : List[Any] = name.replace('''pretrained.model''' ,'''dpt.embeddings''' )
if "patch_embed" in name:
lowerCAmelCase_ : Optional[Any] = name.replace('''patch_embed''' ,'''''' )
if "pos_embed" in name:
lowerCAmelCase_ : Dict = name.replace('''pos_embed''' ,'''position_embeddings''' )
if "attn.proj" in name:
lowerCAmelCase_ : Any = name.replace('''attn.proj''' ,'''attention.output.dense''' )
if "proj" in name and "project" not in name:
lowerCAmelCase_ : Tuple = name.replace('''proj''' ,'''projection''' )
if "blocks" in name:
lowerCAmelCase_ : Optional[Any] = name.replace('''blocks''' ,'''layer''' )
if "mlp.fc1" in name:
lowerCAmelCase_ : Optional[int] = name.replace('''mlp.fc1''' ,'''intermediate.dense''' )
if "mlp.fc2" in name:
lowerCAmelCase_ : Union[str, Any] = name.replace('''mlp.fc2''' ,'''output.dense''' )
if "norm1" in name and "backbone" not in name:
lowerCAmelCase_ : List[str] = name.replace('''norm1''' ,'''layernorm_before''' )
if "norm2" in name and "backbone" not in name:
lowerCAmelCase_ : Optional[Any] = name.replace('''norm2''' ,'''layernorm_after''' )
if "scratch.output_conv" in name:
lowerCAmelCase_ : Optional[int] = name.replace('''scratch.output_conv''' ,'''head''' )
if "scratch" in name:
lowerCAmelCase_ : Dict = name.replace('''scratch''' ,'''neck''' )
if "layer1_rn" in name:
lowerCAmelCase_ : Optional[int] = name.replace('''layer1_rn''' ,'''convs.0''' )
if "layer2_rn" in name:
lowerCAmelCase_ : Union[str, Any] = name.replace('''layer2_rn''' ,'''convs.1''' )
if "layer3_rn" in name:
lowerCAmelCase_ : List[Any] = name.replace('''layer3_rn''' ,'''convs.2''' )
if "layer4_rn" in name:
lowerCAmelCase_ : Optional[int] = name.replace('''layer4_rn''' ,'''convs.3''' )
if "refinenet" in name:
lowerCAmelCase_ : List[str] = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowerCAmelCase_ : Dict = name.replace(f"""refinenet{layer_idx}""" ,f"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
lowerCAmelCase_ : int = name.replace('''out_conv''' ,'''projection''' )
if "resConfUnit1" in name:
lowerCAmelCase_ : Dict = name.replace('''resConfUnit1''' ,'''residual_layer1''' )
if "resConfUnit2" in name:
lowerCAmelCase_ : str = name.replace('''resConfUnit2''' ,'''residual_layer2''' )
if "conv1" in name:
lowerCAmelCase_ : str = name.replace('''conv1''' ,'''convolution1''' )
if "conv2" in name:
lowerCAmelCase_ : Optional[Any] = name.replace('''conv2''' ,'''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowerCAmelCase_ : Dict = name.replace('''pretrained.act_postprocess1.0.project.0''' ,'''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
lowerCAmelCase_ : Optional[int] = name.replace('''pretrained.act_postprocess2.0.project.0''' ,'''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
lowerCAmelCase_ : Tuple = name.replace('''pretrained.act_postprocess3.0.project.0''' ,'''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
lowerCAmelCase_ : Dict = name.replace('''pretrained.act_postprocess4.0.project.0''' ,'''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowerCAmelCase_ : List[Any] = name.replace('''pretrained.act_postprocess1.3''' ,'''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
lowerCAmelCase_ : Dict = name.replace('''pretrained.act_postprocess1.4''' ,'''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
lowerCAmelCase_ : List[Any] = name.replace('''pretrained.act_postprocess2.3''' ,'''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
lowerCAmelCase_ : List[str] = name.replace('''pretrained.act_postprocess2.4''' ,'''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
lowerCAmelCase_ : Optional[Any] = name.replace('''pretrained.act_postprocess3.3''' ,'''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
lowerCAmelCase_ : List[str] = name.replace('''pretrained.act_postprocess4.3''' ,'''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
lowerCAmelCase_ : Optional[Any] = name.replace('''pretrained.act_postprocess4.4''' ,'''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
lowerCAmelCase_ : Tuple = name.replace('''pretrained''' ,'''dpt''' )
if "bn" in name:
lowerCAmelCase_ : Dict = name.replace('''bn''' ,'''batch_norm''' )
if "head" in name:
lowerCAmelCase_ : Any = name.replace('''head''' ,'''head.head''' )
if "encoder.norm" in name:
lowerCAmelCase_ : Tuple = name.replace('''encoder.norm''' ,'''layernorm''' )
if "auxlayer" in name:
lowerCAmelCase_ : Optional[int] = name.replace('''auxlayer''' ,'''auxiliary_head.head''' )
if "backbone" in name:
lowerCAmelCase_ : List[Any] = name.replace('''backbone''' ,'''backbone.bit.encoder''' )
if ".." in name:
lowerCAmelCase_ : List[Any] = name.replace('''..''' ,'''.''' )
if "stem.conv" in name:
lowerCAmelCase_ : str = name.replace('''stem.conv''' ,'''bit.embedder.convolution''' )
if "blocks" in name:
lowerCAmelCase_ : List[str] = name.replace('''blocks''' ,'''layers''' )
if "convolution" in name and "backbone" in name:
lowerCAmelCase_ : Optional[int] = name.replace('''convolution''' ,'''conv''' )
if "layer" in name and "backbone" in name:
lowerCAmelCase_ : Optional[int] = name.replace('''layer''' ,'''layers''' )
if "backbone.bit.encoder.bit" in name:
lowerCAmelCase_ : Union[str, Any] = name.replace('''backbone.bit.encoder.bit''' ,'''backbone.bit''' )
if "embedder.conv" in name:
lowerCAmelCase_ : str = name.replace('''embedder.conv''' ,'''embedder.convolution''' )
if "backbone.bit.encoder.stem.norm" in name:
lowerCAmelCase_ : Dict = name.replace('''backbone.bit.encoder.stem.norm''' ,'''backbone.bit.embedder.norm''' )
return name
def UpperCamelCase( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str] ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase_ : Dict = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
lowerCAmelCase_ : Tuple = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ : str = in_proj_weight[: config.hidden_size, :]
lowerCAmelCase_ : str = in_proj_bias[: config.hidden_size]
lowerCAmelCase_ : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase_ : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase_ : Any = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase_ : Dict = in_proj_bias[-config.hidden_size :]
def UpperCamelCase( ):
lowerCAmelCase_ : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase_ : Dict = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def UpperCamelCase( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Dict ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : str ):
lowerCAmelCase_ , lowerCAmelCase_ : Any = get_dpt_config(__UpperCamelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
lowerCAmelCase_ : List[str] = torch.load(__UpperCamelCase ,map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(__UpperCamelCase )
# rename keys
for key in state_dict.copy().keys():
lowerCAmelCase_ : Any = state_dict.pop(__UpperCamelCase )
lowerCAmelCase_ : Optional[Any] = val
# read in qkv matrices
read_in_q_k_v(__UpperCamelCase ,__UpperCamelCase )
# load HuggingFace model
lowerCAmelCase_ : List[Any] = DPTForSemanticSegmentation(__UpperCamelCase ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
model.eval()
# Check outputs on an image
lowerCAmelCase_ : Tuple = 480 if '''ade''' in checkpoint_url else 384
lowerCAmelCase_ : Optional[int] = DPTImageProcessor(size=__UpperCamelCase )
lowerCAmelCase_ : Union[str, Any] = prepare_img()
lowerCAmelCase_ : str = image_processor(__UpperCamelCase ,return_tensors='''pt''' )
# forward pass
lowerCAmelCase_ : Tuple = model(**__UpperCamelCase ).logits if '''ade''' in checkpoint_url else model(**__UpperCamelCase ).predicted_depth
if show_prediction:
lowerCAmelCase_ : Optional[Any] = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) ,size=(image.size[1], image.size[0]) ,mode='''bicubic''' ,align_corners=__UpperCamelCase ,)
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
model.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
image_processor.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
if __name__ == "__main__":
A__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
parser.add_argument(
'''--show_prediction''',
action='''store_true''',
)
A__ : Union[str, Any] = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 103 | 1 |
'''simple docstring'''
snake_case__ = 2_56
# Modulus to hash a string
snake_case__ = 1_00_00_03
def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : str ) -> bool:
A_ : int = len(lowerCamelCase__ )
A_ : Any = len(lowerCamelCase__ )
if p_len > t_len:
return False
A_ : int = 0
A_ : Union[str, Any] = 0
A_ : Dict = 1
# Calculating the hash of pattern and substring of text
for i in range(lowerCamelCase__ ):
A_ : Union[str, Any] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
A_ : List[str] = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
A_ : str = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
A_ : Optional[Any] = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def snake_case__ ( ) -> None:
A_ : List[str] = '''abc1abc12'''
A_ : str = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
A_ : Optional[int] = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(lowerCamelCase__ , lowerCamelCase__ ) and not rabin_karp(lowerCamelCase__ , lowerCamelCase__ )
# Test 2)
A_ : Tuple = '''ABABX'''
A_ : List[Any] = '''ABABZABABYABABX'''
assert rabin_karp(lowerCamelCase__ , lowerCamelCase__ )
# Test 3)
A_ : Any = '''AAAB'''
A_ : Any = '''ABAAAAAB'''
assert rabin_karp(lowerCamelCase__ , lowerCamelCase__ )
# Test 4)
A_ : Optional[Any] = '''abcdabcy'''
A_ : List[Any] = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(lowerCamelCase__ , lowerCamelCase__ )
# Test 5)
A_ : Tuple = '''Lü'''
A_ : str = '''Lüsai'''
assert rabin_karp(lowerCamelCase__ , lowerCamelCase__ )
A_ : List[str] = '''Lue'''
assert not rabin_karp(lowerCamelCase__ , lowerCamelCase__ )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 4 |
'''simple docstring'''
import pprint
import requests
snake_case__ = """https://zenquotes.io/api"""
def snake_case__ ( ) -> list:
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def snake_case__ ( ) -> list:
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
snake_case__ = random_quotes()
pprint.pprint(response)
| 4 | 1 |
"""simple docstring"""
UpperCAmelCase = [0, 2, 4, 6, 8]
UpperCAmelCase = [1, 3, 5, 7, 9]
def lowercase ( a__ : int , a__ : int , a__ : list[int] , a__ : int ) -> int:
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
_UpperCamelCase = 0
for digit in range(10 ):
_UpperCamelCase = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , a__ , a__ )
return result
_UpperCamelCase = 0
for digita in range(10 ):
_UpperCamelCase = digita
if (remainder + digita) % 2 == 0:
_UpperCamelCase = ODD_DIGITS
else:
_UpperCamelCase = EVEN_DIGITS
for digita in other_parity_digits:
_UpperCamelCase = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , a__ , a__ , )
return result
def lowercase ( a__ : int = 9 ) -> int:
_UpperCamelCase = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(a__ , 0 , [0] * length , a__ )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 256 | """simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''flax''']
def __init__( self : int , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : List[str] ) -> List[str]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : Any , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : int ) -> Any:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : int , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : List[str] ) -> str:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''flax''']
def __init__( self : int , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : Any ) -> str:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : int , *__UpperCamelCase : Dict , **__UpperCamelCase : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : List[Any] , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : Tuple ) -> Optional[Any]:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''flax''']
def __init__( self : str , *__UpperCamelCase : Dict , **__UpperCamelCase : List[Any] ) -> int:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : List[str] , *__UpperCamelCase : Any , **__UpperCamelCase : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : List[str] , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : Tuple ) -> List[str]:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''flax''']
def __init__( self : List[Any] , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : int ) -> Any:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : List[Any] , *__UpperCamelCase : List[str] , **__UpperCamelCase : Dict ) -> Any:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : Any , *__UpperCamelCase : int , **__UpperCamelCase : int ) -> Any:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''flax''']
def __init__( self : List[Any] , *__UpperCamelCase : List[Any] , **__UpperCamelCase : Any ) -> str:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : List[Any] , *__UpperCamelCase : Dict , **__UpperCamelCase : Optional[int] ) -> List[str]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : List[Any] , *__UpperCamelCase : List[Any] , **__UpperCamelCase : int ) -> Tuple:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''flax''']
def __init__( self : Tuple , *__UpperCamelCase : Any , **__UpperCamelCase : List[Any] ) -> List[str]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : int , *__UpperCamelCase : List[Any] , **__UpperCamelCase : Dict ) -> int:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : str , *__UpperCamelCase : int , **__UpperCamelCase : Optional[int] ) -> Any:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''flax''']
def __init__( self : Tuple , *__UpperCamelCase : str , **__UpperCamelCase : Any ) -> Tuple:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : List[str] , *__UpperCamelCase : Tuple , **__UpperCamelCase : List[str] ) -> Tuple:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : Tuple , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : str ) -> Union[str, Any]:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''flax''']
def __init__( self : Union[str, Any] , *__UpperCamelCase : List[Any] , **__UpperCamelCase : Any ) -> Tuple:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : str , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : List[str] ) -> Tuple:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : List[str] , *__UpperCamelCase : str , **__UpperCamelCase : str ) -> Tuple:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''flax''']
def __init__( self : List[str] , *__UpperCamelCase : List[str] , **__UpperCamelCase : Optional[Any] ) -> Optional[Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : List[Any] , *__UpperCamelCase : int , **__UpperCamelCase : Optional[Any] ) -> int:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : Optional[int] , *__UpperCamelCase : List[str] , **__UpperCamelCase : str ) -> List[str]:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''flax''']
def __init__( self : Optional[int] , *__UpperCamelCase : Tuple , **__UpperCamelCase : Tuple ) -> Any:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : Optional[int] , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : Optional[Any] ) -> Union[str, Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : Any , *__UpperCamelCase : Any , **__UpperCamelCase : Union[str, Any] ) -> Any:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''flax''']
def __init__( self : Union[str, Any] , *__UpperCamelCase : Any , **__UpperCamelCase : Optional[Any] ) -> Optional[int]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : Union[str, Any] , *__UpperCamelCase : Tuple , **__UpperCamelCase : str ) -> Optional[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : List[str] , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : Dict ) -> Optional[Any]:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''flax''']
def __init__( self : str , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : Union[str, Any] ) -> List[Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : Optional[Any] , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : Any ) -> Union[str, Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : Optional[Any] , *__UpperCamelCase : str , **__UpperCamelCase : Tuple ) -> List[str]:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''flax''']
def __init__( self : Dict , *__UpperCamelCase : int , **__UpperCamelCase : Tuple ) -> Optional[Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : List[str] , *__UpperCamelCase : Dict , **__UpperCamelCase : List[str] ) -> Dict:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : Any , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : Union[str, Any] ) -> Tuple:
requires_backends(cls , ['''flax'''] )
| 256 | 1 |
def A_ ( _lowerCAmelCase ) -> bool:
if p < 2:
raise ValueError("p should not be less than 2!" )
elif p == 2:
return True
UpperCamelCase : List[str] = 4
UpperCamelCase : Dict = (1 << p) - 1
for _ in range(p - 2 ):
UpperCamelCase : Optional[int] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 140 |
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : Any = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
"""
class A__ ( __snake_case ):
@add_start_docstrings(A_ )
def __call__( self , A_ , A_ , **A_ ):
'''simple docstring'''
raise NotImplementedError("StoppingCriteria needs to be subclassed" )
class A__ ( __snake_case ):
def __init__( self , A_ , A_ = None ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = max_length
UpperCamelCase : Dict = max_position_embeddings
@add_start_docstrings(A_ )
def __call__( self , A_ , A_ , **A_ ):
'''simple docstring'''
UpperCamelCase : int = input_ids.shape[-1]
UpperCamelCase : str = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"This is a friendly reminder - the current text generation call will exceed the model's predefined "
F"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """
"exceptions, performance degradation, or nothing at all." )
return is_done
class A__ ( __snake_case ):
def __init__( self , A_ , A_ ):
'''simple docstring'''
warnings.warn(
"The class `MaxNewTokensCriteria` is deprecated. "
F"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """
"with `max_length = start_length + max_new_tokens` instead." , A_ , )
UpperCamelCase : Union[str, Any] = start_length
UpperCamelCase : List[str] = max_new_tokens
UpperCamelCase : Tuple = start_length + max_new_tokens
@add_start_docstrings(A_ )
def __call__( self , A_ , A_ , **A_ ):
'''simple docstring'''
return input_ids.shape[-1] >= self.max_length
class A__ ( __snake_case ):
def __init__( self , A_ , A_ = None ):
'''simple docstring'''
UpperCamelCase : Optional[int] = max_time
UpperCamelCase : Dict = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(A_ )
def __call__( self , A_ , A_ , **A_ ):
'''simple docstring'''
return time.time() - self.initial_timestamp > self.max_time
class A__ ( __snake_case ):
@add_start_docstrings(A_ )
def __call__( self , A_ , A_ , **A_ ):
'''simple docstring'''
return any(criteria(A_ , A_ ) for criteria in self )
@property
def __UpperCamelCase( self ):
'''simple docstring'''
for stopping_criterium in self:
if isinstance(A_ , A_ ):
return stopping_criterium.max_length
elif isinstance(A_ , A_ ):
return stopping_criterium.max_length
return None
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> StoppingCriteriaList:
UpperCamelCase : Tuple = stopping_criteria.max_length
UpperCamelCase : Union[str, Any] = deepcopy(_lowerCAmelCase )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , _lowerCAmelCase )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=_lowerCAmelCase ) )
return new_stopping_criteria
| 140 | 1 |
"""simple docstring"""
from __future__ import annotations
_lowercase : List[str] = 1.6_0_2_1e-1_9 # units = C
def lowercase__ ( snake_case_ :int , snake_case_ :Dict , snake_case_ :str , ):
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif conductivity < 0:
raise ValueError('''Conductivity cannot be negative''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative''' )
elif mobility < 0:
raise ValueError('''mobility cannot be negative''' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 332 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json''',
}
class A__ ( _snake_case ):
lowercase = "mvp"
lowercase = ["past_key_values"]
lowercase = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , UpperCamelCase__=50267 , UpperCamelCase__=1024 , UpperCamelCase__=12 , UpperCamelCase__=4096 , UpperCamelCase__=16 , UpperCamelCase__=12 , UpperCamelCase__=4096 , UpperCamelCase__=16 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__="gelu" , UpperCamelCase__=1024 , UpperCamelCase__=0.1 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.02 , UpperCamelCase__=0.0 , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=1 , UpperCamelCase__=0 , UpperCamelCase__=2 , UpperCamelCase__=True , UpperCamelCase__=2 , UpperCamelCase__=2 , UpperCamelCase__=False , UpperCamelCase__=100 , UpperCamelCase__=800 , **UpperCamelCase__ , ) -> Tuple:
'''simple docstring'''
A_ = vocab_size
A_ = max_position_embeddings
A_ = d_model
A_ = encoder_ffn_dim
A_ = encoder_layers
A_ = encoder_attention_heads
A_ = decoder_ffn_dim
A_ = decoder_layers
A_ = decoder_attention_heads
A_ = dropout
A_ = attention_dropout
A_ = activation_dropout
A_ = activation_function
A_ = init_std
A_ = encoder_layerdrop
A_ = decoder_layerdrop
A_ = classifier_dropout
A_ = use_cache
A_ = encoder_layers
A_ = scale_embedding # scale factor will be sqrt(d_model) if True
A_ = use_prompt
A_ = prompt_length
A_ = prompt_mid_dim
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , forced_eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , UpperCamelCase__ ):
A_ = self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
"""The config can simply be saved and uploaded again to be fixed.""" )
| 162 | 0 |
"""simple docstring"""
def _lowercase ( __lowerCAmelCase ) -> list:
if len(lowerCamelCase__ ) <= 1:
return lst
SCREAMING_SNAKE_CASE__ : Any = 1
while i < len(lowerCamelCase__ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = lst[i], lst[i - 1]
i -= 1
if i == 0:
SCREAMING_SNAKE_CASE__ : List[Any] = 1
return lst
if __name__ == "__main__":
a :Optional[int] = input("Enter numbers separated by a comma:\n").strip()
a :int = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 354 |
"""simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class __a (UpperCamelCase_):
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=False , _a=True , _a="None" , _a=3 , _a=4 , _a=None , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = parent
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : str = seq_length
SCREAMING_SNAKE_CASE__ : str = is_training
SCREAMING_SNAKE_CASE__ : List[Any] = use_input_mask
SCREAMING_SNAKE_CASE__ : str = use_token_type_ids
SCREAMING_SNAKE_CASE__ : Tuple = use_labels
SCREAMING_SNAKE_CASE__ : List[str] = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : str = num_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE__ : Tuple = hidden_act
SCREAMING_SNAKE_CASE__ : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Any = max_position_embeddings
SCREAMING_SNAKE_CASE__ : List[str] = type_vocab_size
SCREAMING_SNAKE_CASE__ : Dict = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : List[str] = initializer_range
SCREAMING_SNAKE_CASE__ : List[str] = num_labels
SCREAMING_SNAKE_CASE__ : Optional[int] = num_choices
SCREAMING_SNAKE_CASE__ : List[str] = relative_attention
SCREAMING_SNAKE_CASE__ : str = position_biased_input
SCREAMING_SNAKE_CASE__ : List[str] = pos_att_type
SCREAMING_SNAKE_CASE__ : Union[str, Any] = scope
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
SCREAMING_SNAKE_CASE__ : str = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : int = None
SCREAMING_SNAKE_CASE__ : Any = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self ) -> Tuple:
"""simple docstring"""
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_config()
SCREAMING_SNAKE_CASE__ : Any = 300
return config
def _a ( self , _a ) -> List[str]:
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = DebertaModel(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : Any = model(_a , attention_mask=_a , token_type_ids=_a )[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_a , token_type_ids=_a )[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_a )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = DebertaForMaskedLM(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE__ : Tuple = DebertaForSequenceClassification(_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : Any = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(_a )
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE__ : Optional[Any] = DebertaForTokenClassification(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : int = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = DebertaForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] = model(
_a , attention_mask=_a , token_type_ids=_a , start_positions=_a , end_positions=_a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE__ : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __a (UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :List[str] = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE :str = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE :Union[str, Any] = True
_SCREAMING_SNAKE_CASE :str = False
_SCREAMING_SNAKE_CASE :Dict = False
_SCREAMING_SNAKE_CASE :Dict = False
_SCREAMING_SNAKE_CASE :Union[str, Any] = False
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = DebertaModelTester(self )
SCREAMING_SNAKE_CASE__ : str = ConfigTester(self , config_class=_a , hidden_size=37 )
def _a ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*_a )
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*_a )
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*_a )
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*_a )
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*_a )
@slow
def _a ( self ) -> Optional[int]:
"""simple docstring"""
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Dict = DebertaModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_torch
@require_sentencepiece
@require_tokenizers
class __a (unittest.TestCase):
'''simple docstring'''
@unittest.skip(reason="""Model not available yet""" )
def _a ( self ) -> Any:
"""simple docstring"""
pass
@slow
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = DebertaModel.from_pretrained("""microsoft/deberta-base""" )
SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[int] = model(_a , attention_mask=_a )[0]
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor(
[[[-0.5_986, -0.8_055, -0.8_462], [1.4_484, -0.9_348, -0.8_059], [0.3_123, 0.0_032, -1.4_131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _a , atol=1E-4 ) , f'''{output[:, 1:4, 1:4]}''' )
| 56 | 0 |
"""simple docstring"""
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> int:
while a != 0:
snake_case_ , snake_case_ = b % a, a
return b
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> int:
if gcd(UpperCAmelCase , UpperCAmelCase ) != 1:
snake_case_ = f'mod inverse of {a!r} and {m!r} does not exist'
raise ValueError(UpperCAmelCase )
snake_case_ , snake_case_ , snake_case_ = 1, 0, a
snake_case_ , snake_case_ , snake_case_ = 0, 1, m
while va != 0:
snake_case_ = ua // va
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 69 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
a = logging.get_logger(__name__)
a = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE__ ( _a ):
_a = 'deberta-v2'
def __init__( self : Optional[Any] , lowerCAmelCase : Optional[Any]=12_8100 , lowerCAmelCase : Any=1536 , lowerCAmelCase : str=24 , lowerCAmelCase : str=24 , lowerCAmelCase : Optional[Any]=6144 , lowerCAmelCase : List[str]="gelu" , lowerCAmelCase : str=0.1 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Optional[int]=512 , lowerCAmelCase : Optional[Any]=0 , lowerCAmelCase : Optional[int]=0.02 , lowerCAmelCase : Optional[int]=1e-7 , lowerCAmelCase : Union[str, Any]=False , lowerCAmelCase : Dict=-1 , lowerCAmelCase : int=0 , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : int=None , lowerCAmelCase : Any=0 , lowerCAmelCase : Dict="gelu" , **lowerCAmelCase : Any , ):
super().__init__(**lowerCAmelCase )
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = relative_attention
lowerCAmelCase = max_relative_positions
lowerCAmelCase = pad_token_id
lowerCAmelCase = position_biased_input
# Backwards compatibility
if type(lowerCAmelCase ) == str:
lowerCAmelCase = [x.strip() for x in pos_att_type.lower().split("""|""" )]
lowerCAmelCase = pos_att_type
lowerCAmelCase = vocab_size
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = kwargs.get("""pooler_hidden_size""" , lowerCAmelCase )
lowerCAmelCase = pooler_dropout
lowerCAmelCase = pooler_hidden_act
class SCREAMING_SNAKE_CASE__ ( _a ):
@property
def __lowercase ( self : Optional[int] ):
if self.task == "multiple-choice":
lowerCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def __lowercase ( self : List[str] ):
return 12
def __lowercase ( self : int , lowerCAmelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowerCAmelCase : int = -1 , lowerCAmelCase : int = -1 , lowerCAmelCase : int = -1 , lowerCAmelCase : bool = False , lowerCAmelCase : Optional["TensorType"] = None , lowerCAmelCase : int = 3 , lowerCAmelCase : int = 40 , lowerCAmelCase : int = 40 , lowerCAmelCase : "PreTrainedTokenizerBase" = None , ):
lowerCAmelCase = super().generate_dummy_inputs(preprocessor=lowerCAmelCase , framework=lowerCAmelCase )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 155 | 0 |
"""simple docstring"""
class UpperCamelCase_ :
def __init__( self : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Optional[Any]=None ) -> Any:
UpperCAmelCase_ : List[str] = data
UpperCAmelCase_ : Any = previous
UpperCAmelCase_ : int = next_node
def __str__( self : str ) -> str:
return f"""{self.data}"""
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return self.data
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
return self.next
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
return self.previous
class UpperCamelCase_ :
def __init__( self : Optional[Any] , lowerCAmelCase_ : int ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = head
def __iter__( self : int ) -> int:
return self
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
if not self.current:
raise StopIteration
else:
UpperCAmelCase_ : Optional[Any] = self.current.get_data()
UpperCAmelCase_ : int = self.current.get_next()
return value
class UpperCamelCase_ :
def __init__( self : str ) -> Optional[Any]:
UpperCAmelCase_ : Union[str, Any] = None # First node in list
UpperCAmelCase_ : Tuple = None # Last node in list
def __str__( self : Dict ) -> int:
UpperCAmelCase_ : List[Any] = self.head
UpperCAmelCase_ : int = []
while current is not None:
nodes.append(current.get_data() )
UpperCAmelCase_ : List[Any] = current.get_next()
return " ".join(str(lowerCAmelCase_ ) for node in nodes )
def __contains__( self : Dict , lowerCAmelCase_ : int ) -> List[Any]:
UpperCAmelCase_ : str = self.head
while current:
if current.get_data() == value:
return True
UpperCAmelCase_ : Optional[Any] = current.get_next()
return False
def __iter__( self : Optional[int] ) -> Any:
return LinkedListIterator(self.head )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
if self.head:
return self.head.get_data()
return None
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
if self.tail:
return self.tail.get_data()
return None
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : Node ) -> None:
if self.head is None:
UpperCAmelCase_ : Dict = node
UpperCAmelCase_ : Optional[int] = node
else:
self.insert_before_node(self.head , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Node ) -> None:
if self.head is None:
self.set_head(lowerCAmelCase_ )
else:
self.insert_after_node(self.tail , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : int ) -> None:
UpperCAmelCase_ : Any = Node(lowerCAmelCase_ )
if self.head is None:
self.set_head(lowerCAmelCase_ )
else:
self.set_tail(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : Node , lowerCAmelCase_ : Node ) -> None:
UpperCAmelCase_ : Any = node
UpperCAmelCase_ : Optional[int] = node.previous
if node.get_previous() is None:
UpperCAmelCase_ : Optional[Any] = node_to_insert
else:
UpperCAmelCase_ : Union[str, Any] = node_to_insert
UpperCAmelCase_ : Optional[int] = node_to_insert
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : Node , lowerCAmelCase_ : Node ) -> None:
UpperCAmelCase_ : int = node
UpperCAmelCase_ : Tuple = node.next
if node.get_next() is None:
UpperCAmelCase_ : str = node_to_insert
else:
UpperCAmelCase_ : List[Any] = node_to_insert
UpperCAmelCase_ : Tuple = node_to_insert
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> None:
UpperCAmelCase_ : Union[str, Any] = 1
UpperCAmelCase_ : Any = Node(lowerCAmelCase_ )
UpperCAmelCase_ : int = self.head
while node:
if current_position == position:
self.insert_before_node(lowerCAmelCase_ , lowerCAmelCase_ )
return
current_position += 1
UpperCAmelCase_ : List[Any] = node.next
self.insert_after_node(self.tail , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : int ) -> Node:
UpperCAmelCase_ : Optional[Any] = self.head
while node:
if node.get_data() == item:
return node
UpperCAmelCase_ : Tuple = node.get_next()
raise Exception("Node not found" )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : str ) -> Dict:
if (node := self.get_node(lowerCAmelCase_ )) is not None:
if node == self.head:
UpperCAmelCase_ : Tuple = self.head.get_next()
if node == self.tail:
UpperCAmelCase_ : Union[str, Any] = self.tail.get_previous()
self.remove_node_pointers(lowerCAmelCase_ )
@staticmethod
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase_ : Node ) -> None:
if node.get_next():
UpperCAmelCase_ : Tuple = node.previous
if node.get_previous():
UpperCAmelCase_ : Union[str, Any] = node.next
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Dict = None
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
return self.head is None
def snake_case ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 253 |
"""simple docstring"""
import re
import string
import numpy as np
import datasets
lowerCamelCase_ = '''
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
'''
lowerCamelCase_ = '''
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
25.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
50.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
75.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results["exact_match"], 1))
100.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]
>>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
33.3
'''
lowerCamelCase_ = '''
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ (datasets.Metric ):
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , reference_urls=[] , )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Optional[Any]=False , ) -> str:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
UpperCAmelCase_ : str = np.array([re.sub(lowerCAmelCase_ , "" , lowerCAmelCase_ ) for x in predictions] )
UpperCAmelCase_ : Dict = np.array([re.sub(lowerCAmelCase_ , "" , lowerCAmelCase_ ) for x in references] )
else:
UpperCAmelCase_ : int = np.asarray(lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = np.asarray(lowerCAmelCase_ )
if ignore_case:
UpperCAmelCase_ : Optional[Any] = np.char.lower(lowerCAmelCase_ )
UpperCAmelCase_ : int = np.char.lower(lowerCAmelCase_ )
if ignore_punctuation:
UpperCAmelCase_ : Any = string.punctuation.maketrans("" , "" , string.punctuation )
UpperCAmelCase_ : Any = np.char.translate(lowerCAmelCase_ , table=lowerCAmelCase_ )
UpperCAmelCase_ : Any = np.char.translate(lowerCAmelCase_ , table=lowerCAmelCase_ )
if ignore_numbers:
UpperCAmelCase_ : Dict = string.digits.maketrans("" , "" , string.digits )
UpperCAmelCase_ : Optional[Any] = np.char.translate(lowerCAmelCase_ , table=lowerCAmelCase_ )
UpperCAmelCase_ : int = np.char.translate(lowerCAmelCase_ , table=lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = predictions == references
return {"exact_match": np.mean(lowerCAmelCase_ ) * 100}
| 253 | 1 |
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
__a :int = getLogger(__name__)
def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : str ,__UpperCamelCase : str ,__UpperCamelCase : int = 8 ,__UpperCamelCase : int = 1024 ,__UpperCamelCase : str="val" ,__UpperCamelCase : Optional[int]=None ,__UpperCamelCase : Any=False ,__UpperCamelCase : List[str]="summarization" ,__UpperCamelCase : Tuple=None ,__UpperCamelCase : Union[str, Any]=1 ,__UpperCamelCase : Dict = None ,__UpperCamelCase : List[str]="" ,**__UpperCamelCase : Union[str, Any] ,):
"""simple docstring"""
A_ = str(__UpperCamelCase )
assert local_rank is not None
torch.distributed.init_process_group(backend="nccl" ,rank=__UpperCamelCase )
A_ = Path(__UpperCamelCase )
A_ = save_dir.joinpath(f'''rank_{local_rank}_output.json''' )
torch.cuda.set_device(__UpperCamelCase )
A_ = AutoModelForSeqaSeqLM.from_pretrained(__UpperCamelCase ).cuda()
if fpaa:
A_ = model.half()
# determine if we need to increase num_beams
use_task_specific_params(__UpperCamelCase ,__UpperCamelCase ) # update config with task specific params
A_ = generate_kwargs.pop("num_beams" ,model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
A_ = num_return_sequences
A_ = AutoTokenizer.from_pretrained(__UpperCamelCase )
logger.info(f'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type.
if max_source_length is None:
A_ = tokenizer.model_max_length
if prefix is None:
A_ = prefix or getattr(model.config ,"prefix" ,"" ) or ""
A_ = SeqaSeqDataset(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,max_target_length=1024 ,type_path=__UpperCamelCase ,n_obs=__UpperCamelCase ,prefix=__UpperCamelCase ,**__UpperCamelCase ,)
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
A_ = ds.make_sortish_sampler(__UpperCamelCase ,distributed=__UpperCamelCase ,add_extra_examples=__UpperCamelCase ,shuffle=__UpperCamelCase )
A_ = DataLoader(__UpperCamelCase ,sampler=__UpperCamelCase ,batch_size=__UpperCamelCase ,collate_fn=ds.collate_fn )
A_ = []
for batch in tqdm(__UpperCamelCase ):
A_ = model.generate(
input_ids=batch["input_ids"].to(model.device ) ,attention_mask=batch["attention_mask"].to(model.device ) ,num_return_sequences=__UpperCamelCase ,num_beams=__UpperCamelCase ,**__UpperCamelCase ,)
A_ = tokenizer.batch_decode(__UpperCamelCase ,skip_special_tokens=__UpperCamelCase ,clean_up_tokenization_spaces=__UpperCamelCase )
A_ = batch["ids"]
if num_return_sequences > 1:
A_ = chunks(__UpperCamelCase ,__UpperCamelCase ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(__UpperCamelCase ):
results.append({"pred": pred, "id": ids[i].item()} )
save_json(__UpperCamelCase ,__UpperCamelCase )
return results, sampler.num_replicas
def __snake_case ( ):
"""simple docstring"""
A_ = argparse.ArgumentParser(
epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" )
parser.add_argument("--data_dir" ,type=__UpperCamelCase ,help="like cnn_dm/test.source" )
parser.add_argument(
"--model_name" ,type=__UpperCamelCase ,help="like facebook/bart-large-cnn,t5-base, etc." ,default="sshleifer/distilbart-xsum-12-3" ,)
parser.add_argument("--save_dir" ,type=__UpperCamelCase ,help="where to save" ,default="tmp_gen" )
parser.add_argument("--max_source_length" ,type=__UpperCamelCase ,default=__UpperCamelCase )
parser.add_argument(
"--type_path" ,type=__UpperCamelCase ,default="test" ,help="which subset to evaluate typically train/val/test" )
parser.add_argument("--task" ,type=__UpperCamelCase ,default="summarization" ,help="used for task_specific_params + metrics" )
parser.add_argument("--bs" ,type=__UpperCamelCase ,default=8 ,required=__UpperCamelCase ,help="batch size" )
parser.add_argument(
"--local_rank" ,type=__UpperCamelCase ,default=-1 ,required=__UpperCamelCase ,help="should be passed by distributed.launch" )
parser.add_argument(
"--n_obs" ,type=__UpperCamelCase ,default=__UpperCamelCase ,required=__UpperCamelCase ,help="How many observations. Defaults to all." )
parser.add_argument(
"--num_return_sequences" ,type=__UpperCamelCase ,default=1 ,required=__UpperCamelCase ,help="How many sequences to return" )
parser.add_argument(
"--sync_timeout" ,type=__UpperCamelCase ,default=600 ,required=__UpperCamelCase ,help="How long should master process wait for other processes to finish." ,)
parser.add_argument("--src_lang" ,type=__UpperCamelCase ,default=__UpperCamelCase ,required=__UpperCamelCase )
parser.add_argument("--tgt_lang" ,type=__UpperCamelCase ,default=__UpperCamelCase ,required=__UpperCamelCase )
parser.add_argument(
"--prefix" ,type=__UpperCamelCase ,required=__UpperCamelCase ,default=__UpperCamelCase ,help="will be added to the begininng of src examples" )
parser.add_argument("--fp16" ,action="store_true" )
parser.add_argument("--debug" ,action="store_true" )
A_ = time.time()
A_ , A_ = parser.parse_known_args()
A_ = parse_numeric_n_bool_cl_kwargs(__UpperCamelCase )
if generate_kwargs and args.local_rank <= 0:
print(f'''parsed the following generate kwargs: {generate_kwargs}''' )
A_ = Path(args.save_dir + "_tmp" )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase ) # this handles locking.
A_ = list(json_save_dir.glob("rank_*.json" ) )
if intermediate_files:
raise ValueError(f'''Found files at {json_save_dir} please move or remove them.''' )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
A_ = {}
if args.src_lang is not None:
A_ = args.src_lang
if args.tgt_lang is not None:
A_ = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=__UpperCamelCase )
A_ , A_ = eval_data_dir(
args.data_dir ,__UpperCamelCase ,args.model_name ,type_path=args.type_path ,bs=args.bs ,fpaa=args.fpaa ,task=args.task ,local_rank=args.local_rank ,n_obs=args.n_obs ,max_source_length=args.max_source_length ,num_return_sequences=args.num_return_sequences ,prefix=args.prefix ,dataset_kwargs=__UpperCamelCase ,**__UpperCamelCase ,)
if args.local_rank <= 0:
A_ = Path(args.save_dir )
save_dir.mkdir(exist_ok=__UpperCamelCase )
A_ = gather_results_from_each_node(__UpperCamelCase ,__UpperCamelCase ,args.sync_timeout )
A_ = combine_partial_results(__UpperCamelCase )
if args.num_return_sequences > 1:
A_ = save_dir.joinpath("pseudolabel_results.json" )
print(f'''Saving aggregated results at {save_path}, intermediate in {json_save_dir}/''' )
save_json(__UpperCamelCase ,__UpperCamelCase )
return
A_ = Path(args.data_dir ).joinpath(args.type_path + ".target" )
with open(__UpperCamelCase ) as f:
A_ = [x.rstrip() for x in f.readlines()][: len(__UpperCamelCase )]
# Calculate metrics, save metrics, and save _generations.txt
A_ = "translation" in args.task
A_ = calculate_bleu if calc_bleu else calculate_rouge
A_ = "bleu" if calc_bleu else "rouge"
A_ = score_fn(__UpperCamelCase ,__UpperCamelCase )
A_ = len(__UpperCamelCase )
A_ = time.time() - start_time
A_ = round(runtime / metrics["n_obs"] ,4 )
A_ = num_replicas
# TODO(@stas00): add whatever metadata to metrics
A_ = save_dir.joinpath(f'''{args.type_path}_{metric_name}.json''' )
save_json(__UpperCamelCase ,__UpperCamelCase ,indent=__UpperCamelCase )
print(__UpperCamelCase )
write_txt_file(__UpperCamelCase ,save_dir.joinpath(f'''{args.type_path}_generations.txt''' ) )
if args.debug:
write_txt_file(__UpperCamelCase ,save_dir.joinpath(f'''{args.type_path}.target''' ) )
else:
shutil.rmtree(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = []
for partial_result in partial_results:
records.extend(__UpperCamelCase )
A_ = sorted(__UpperCamelCase ,key=lambda __UpperCamelCase : x["id"] )
A_ = [x["pred"] for x in records]
return preds
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Dict ,__UpperCamelCase : int ):
"""simple docstring"""
A_ = time.time()
logger.info("waiting for all nodes to finish" )
A_ = None
while (time.time() - start_wait) < timeout:
A_ = list(save_dir.glob("rank_*.json" ) )
if len(__UpperCamelCase ) < num_replicas:
continue
try:
# make sure all json files are fully saved
A_ = lmap(__UpperCamelCase ,__UpperCamelCase )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("Rank 0 gave up on waiting for other processes" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate() | 312 |
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
__a :int = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : int = 101 ):
A_ = length
def __len__( self : int ):
return self.length
def __getitem__( self : Optional[int] , UpperCAmelCase : Optional[int] ):
return i
class _a :
"""simple docstring"""
def __call__( self : Any , UpperCAmelCase : Optional[Any] ):
return {"input_ids": torch.tensor(UpperCAmelCase ), "labels": torch.tensor(UpperCAmelCase )}
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self : int ):
super().__init__()
# Add some (unused) params otherwise DDP will complain.
A_ = nn.Linear(120 , 80 )
def __A ( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : Tuple=None ):
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class _a ( snake_case_ ):
"""simple docstring"""
@require_torch_neuroncore
def __A ( self : List[str] ):
A_ = f'''--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
A_ = self.get_auto_remove_tmp_dir()
A_ = f'''--output_dir {output_dir}'''.split()
A_ = ["torchrun"] + distributed_args + args
execute_subprocess_async(UpperCAmelCase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class _a ( snake_case_ ):
"""simple docstring"""
@require_torch_multi_gpu
def __A ( self : List[str] ):
A_ = f'''--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
A_ = self.get_auto_remove_tmp_dir()
A_ = f'''--output_dir {output_dir}'''.split()
A_ = ["torchrun"] + distributed_args + args
execute_subprocess_async(UpperCAmelCase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
__a :Union[str, Any] = HfArgumentParser((TrainingArguments,))
__a :Tuple = parser.parse_args_into_dataclasses()[0]
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
F"distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
__a :int = DummyDataset(dataset_length)
def __snake_case ( __UpperCamelCase : EvalPrediction ):
"""simple docstring"""
A_ = list(range(len(__UpperCamelCase ) ) )
A_ = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"Predictions and/or labels do not match expected results:\n - predictions: "
f'''{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}''' )
return {"success": success}
__a :str = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
__a :str = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__a :str = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__a :Optional[int] = 2
__a :List[Any] = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__a :str = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__a :Union[str, Any] = None | 312 | 1 |
"""simple docstring"""
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Optional[Any] ,lowercase_ : Union[str, Any] ,lowercase_ : Any ,lowercase_ : List[Any] ):
self.assertEqual(len(lowercase_ ) ,len(lowercase_ ) )
for a, b in zip(lowercase_ ,lowercase_ ):
self.assertAlmostEqual(lowercase_ ,lowercase_ ,delta=lowercase_ )
def __lowerCAmelCase ( self : List[str] ):
lowerCAmelCase__ : Any = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(lowercase_ ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step ,3 )
self.assertEqual(len(accumulator.gradients ) ,1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() ,[-2.0, 5.0] ,tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step ,0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() ,[0.0, 0.0] ,tol=1E-2 )
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : str = None
ops.enable_eager_execution_internal()
lowerCAmelCase__ : Optional[Any] = tf.config.list_physical_devices('''CPU''' )
if len(lowercase_ ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] ,[tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
lowerCAmelCase__ : str = tf.config.list_logical_devices(device_type='''CPU''' )
lowerCAmelCase__ : List[str] = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
lowerCAmelCase__ : List[Any] = GradientAccumulator()
lowerCAmelCase__ : str = tf.Variable([4.0, 3.0] )
lowerCAmelCase__ ,lowerCAmelCase__ : Optional[int] = create_optimizer(5E-5 ,1_0 ,5 )
lowerCAmelCase__ : str = tf.Variable([0.0, 0.0] ,trainable=lowercase_ )
def accumulate_on_replica(lowercase_ : Dict ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients ,[variable] ) ) )
@tf.function
def accumulate(lowercase_ : int ,lowercase_ : str ):
with strategy.scope():
lowerCAmelCase__ : Dict = strategy.experimental_local_results(lowercase_ )
local_variables[0].assign(lowercase_ )
local_variables[1].assign(lowercase_ )
strategy.run(lowercase_ ,args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(lowercase_ )
def _check_local_values(lowercase_ : Union[str, Any] ,lowercase_ : Optional[int] ):
lowerCAmelCase__ : Any = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() ,lowercase_ ,tol=1E-2 )
self.assertListAlmostEqual(values[1].value() ,lowercase_ ,tol=1E-2 )
accumulate([1.0, 2.0] ,[-1.0, 1.0] )
accumulate([3.0, -1.0] ,[-1.0, -1.0] )
accumulate([-2.0, 2.0] ,[3.0, -2.0] )
self.assertEqual(accumulator.step ,3 )
_check_local_values([2.0, 3.0] ,[1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() ,[4.0, 3.0] ,tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step ,0 )
_check_local_values([0.0, 0.0] ,[0.0, 0.0] )
| 74 |
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
__UpperCamelCase : int = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
__UpperCamelCase : Dict = F'''https://www.google.com/search?q={query}&num=100'''
__UpperCamelCase : Tuple = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
__UpperCamelCase : Tuple = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
__UpperCamelCase : Optional[Any] = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 74 | 1 |
'''simple docstring'''
def UpperCamelCase ( _lowerCamelCase : int , _lowerCamelCase : float , _lowerCamelCase : float ):
return round(float(moles / volume ) * nfactor )
def UpperCamelCase ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ):
return round(float((moles * 0.0_8_2_1 * temperature) / (volume) ) )
def UpperCamelCase ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ):
return round(float((moles * 0.0_8_2_1 * temperature) / (pressure) ) )
def UpperCamelCase ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ):
return round(float((pressure * volume) / (0.0_8_2_1 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 237 |
'''simple docstring'''
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
__lowerCAmelCase : Dict =DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
__lowerCAmelCase : List[Any] ="main"
# Default branch name
__lowerCAmelCase : int ="f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"
# One particular commit (not the top of `main`)
__lowerCAmelCase : List[Any] ="aaaaaaa"
# This commit does not exist, so we should 404.
__lowerCAmelCase : Optional[int] ="d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"
# Sha-1 of config.json on the top of `main`, for checking purposes
__lowerCAmelCase : Dict ="4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"
@contextlib.contextmanager
def UpperCamelCase ( ):
print("Welcome!" )
yield
print("Bye!" )
@contextlib.contextmanager
def UpperCamelCase ( ):
print("Bonjour!" )
yield
print("Au revoir!" )
class UpperCAmelCase ( unittest.TestCase ):
def UpperCAmelCase_ ( self :str )-> List[str]:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec("transformers" ) is not None
class UpperCAmelCase ( unittest.TestCase ):
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def UpperCAmelCase_ ( self :Union[str, Any] , lowercase_ :Union[str, Any] )-> Any:
with ContextManagers([] ):
print("Transformers are awesome!" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , "Transformers are awesome!\n" )
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def UpperCAmelCase_ ( self :Dict , lowercase_ :Optional[Any] )-> Tuple:
with ContextManagers([context_en()] ):
print("Transformers are awesome!" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , "Welcome!\nTransformers are awesome!\nBye!\n" )
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def UpperCAmelCase_ ( self :Union[str, Any] , lowercase_ :int )-> Union[str, Any]:
with ContextManagers([context_fr(), context_en()] ):
print("Transformers are awesome!" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , "Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n" )
@require_torch
def UpperCAmelCase_ ( self :int )-> Dict:
self.assertEqual(find_labels(lowercase_ ) , ["labels"] )
self.assertEqual(find_labels(lowercase_ ) , ["labels", "next_sentence_label"] )
self.assertEqual(find_labels(lowercase_ ) , ["start_positions", "end_positions"] )
class UpperCAmelCase ( UpperCamelCase__ ):
pass
self.assertEqual(find_labels(lowercase_ ) , ["labels"] )
@require_tf
def UpperCAmelCase_ ( self :Union[str, Any] )-> Union[str, Any]:
self.assertEqual(find_labels(lowercase_ ) , ["labels"] )
self.assertEqual(find_labels(lowercase_ ) , ["labels", "next_sentence_label"] )
self.assertEqual(find_labels(lowercase_ ) , ["start_positions", "end_positions"] )
class UpperCAmelCase ( UpperCamelCase__ ):
pass
self.assertEqual(find_labels(lowercase_ ) , ["labels"] )
@require_flax
def UpperCAmelCase_ ( self :Dict )-> str:
# Flax models don't have labels
self.assertEqual(find_labels(lowercase_ ) , [] )
self.assertEqual(find_labels(lowercase_ ) , [] )
self.assertEqual(find_labels(lowercase_ ) , [] )
class UpperCAmelCase ( UpperCamelCase__ ):
pass
self.assertEqual(find_labels(lowercase_ ) , [] )
| 237 | 1 |
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
"""simple docstring"""
return int(input_a == input_a == 0 )
def __lowerCamelCase ( ):
"""simple docstring"""
print('''Truth Table of NOR Gate:''' )
print('''| Input 1 | Input 2 | Output |''' )
print(F'''| 0 | 0 | {nor_gate(0 , 0 )} |''' )
print(F'''| 0 | 1 | {nor_gate(0 , 1 )} |''' )
print(F'''| 1 | 0 | {nor_gate(1 , 0 )} |''' )
print(F'''| 1 | 1 | {nor_gate(1 , 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 354 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def __lowerCamelCase ( UpperCAmelCase_ : dict ):
"""simple docstring"""
return (data["data"], data["target"])
def __lowerCamelCase ( UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : np.ndarray ):
"""simple docstring"""
a :Optional[Any] = XGBClassifier()
classifier.fit(UpperCAmelCase_ , UpperCAmelCase_ )
return classifier
def __lowerCamelCase ( ):
"""simple docstring"""
a :List[Any] = load_iris()
a , a :Any = data_handling(UpperCAmelCase_ )
a , a , a , a :Tuple = train_test_split(
UpperCAmelCase_ , UpperCAmelCase_ , test_size=0.25 )
a :List[Any] = iris['''target_names''']
# Create an XGBoost Classifier from the training data
a :Optional[int] = xgboost(UpperCAmelCase_ , UpperCAmelCase_ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , display_labels=UpperCAmelCase_ , cmap='''Blues''' , normalize='''true''' , )
plt.title('''Normalized Confusion Matrix - IRIS Dataset''' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 281 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Any = logging.get_logger(__name__)
lowercase__ : Optional[Any] = {
'microsoft/markuplm-base': 'https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json',
'microsoft/markuplm-large': 'https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json',
}
class lowercase_ ( __snake_case ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = """markuplm"""
def __init__( self , __SCREAMING_SNAKE_CASE=30522 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.0_2 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=1024 , __SCREAMING_SNAKE_CASE=216 , __SCREAMING_SNAKE_CASE=1001 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=50 , __SCREAMING_SNAKE_CASE="absolute" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ) ->Dict:
super().__init__(
pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = position_embedding_type
lowerCAmelCase = use_cache
lowerCAmelCase = classifier_dropout
# additional properties
lowerCAmelCase = max_depth
lowerCAmelCase = max_xpath_tag_unit_embeddings
lowerCAmelCase = max_xpath_subs_unit_embeddings
lowerCAmelCase = tag_pad_id
lowerCAmelCase = subs_pad_id
lowerCAmelCase = xpath_unit_hidden_size
| 338 |
'''simple docstring'''
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowercase : Union[str, Any] = {
'text_branch': 'text_model',
'audio_branch': 'audio_model.audio_encoder',
'attn': 'attention.self',
'self.proj': 'output.dense',
'attention.self_mask': 'attn_mask',
'mlp.fc1': 'intermediate.dense',
'mlp.fc2': 'output.dense',
'norm1': 'layernorm_before',
'norm2': 'layernorm_after',
'bn0': 'batch_norm',
}
lowercase : Tuple = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc')
def lowerCAmelCase_ ( snake_case__ , snake_case__=False ):
'''simple docstring'''
A, A : Tuple = create_model(
'''HTSAT-tiny''' , '''roberta''' , snake_case__ , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=snake_case__ , fusion_type='''aff_2d''' if enable_fusion else None , )
return model, model_cfg
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Dict = {}
A : str = R'''.*sequential.(\d+).*'''
A : Union[str, Any] = R'''.*_projection.(\d+).*'''
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
A : Any = key.replace(snake_case__ , snake_case__ )
if re.match(snake_case__ , snake_case__ ):
# replace sequential layers with list
A : Any = re.match(snake_case__ , snake_case__ ).group(1 )
A : List[str] = key.replace(F'sequential.{sequential_layer}.' , F'layers.{int(snake_case__ )//3}.linear.' )
elif re.match(snake_case__ , snake_case__ ):
A : Union[str, Any] = int(re.match(snake_case__ , snake_case__ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
A : str = 1 if projecton_layer == 0 else 2
A : Optional[Any] = key.replace(F'_projection.{projecton_layer}.' , F'_projection.linear{transformers_projection_layer}.' )
if "audio" and "qkv" in key:
# split qkv into query key and value
A : int = value
A : List[Any] = mixed_qkv.size(0 ) // 3
A : Union[str, Any] = mixed_qkv[:qkv_dim]
A : Optional[int] = mixed_qkv[qkv_dim : qkv_dim * 2]
A : Optional[int] = mixed_qkv[qkv_dim * 2 :]
A : Tuple = query_layer
A : Union[str, Any] = key_layer
A : Optional[int] = value_layer
else:
A : Dict = value
return model_state_dict
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ):
'''simple docstring'''
A, A : int = init_clap(snake_case__ , enable_fusion=snake_case__ )
clap_model.eval()
A : str = clap_model.state_dict()
A : Union[str, Any] = rename_state_dict(snake_case__ )
A : Tuple = ClapConfig()
A : str = enable_fusion
A : str = ClapModel(snake_case__ )
# ignore the spectrogram embedding layer
model.load_state_dict(snake_case__ , strict=snake_case__ )
model.save_pretrained(snake_case__ )
transformers_config.save_pretrained(snake_case__ )
if __name__ == "__main__":
lowercase : List[str] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not')
lowercase : Tuple = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 3 | 0 |
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def A ():
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""-m""" , """--pretrained_model_name_or_path""" , type=__lowerCamelCase , default=__lowerCamelCase , required=__lowerCamelCase , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , )
parser.add_argument(
"""-c""" , """--caption""" , type=__lowerCamelCase , default="""robotic cat with wings""" , help="""Text used to generate images.""" , )
parser.add_argument(
"""-n""" , """--images_num""" , type=__lowerCamelCase , default=4 , help="""How much images to generate.""" , )
parser.add_argument(
"""-s""" , """--seed""" , type=__lowerCamelCase , default=42 , help="""Seed for random process.""" , )
parser.add_argument(
"""-ci""" , """--cuda_id""" , type=__lowerCamelCase , default=0 , help="""cuda_id.""" , )
_lowerCAmelCase = parser.parse_args()
return args
def A (__lowerCamelCase :Optional[int] , __lowerCamelCase :Tuple , __lowerCamelCase :Optional[Any] ):
if not len(__lowerCamelCase ) == rows * cols:
raise ValueError("""The specified number of rows and columns are not correct.""" )
_lowerCAmelCase , _lowerCAmelCase = imgs[0].size
_lowerCAmelCase = Image.new("""RGB""" , size=(cols * w, rows * h) )
_lowerCAmelCase , _lowerCAmelCase = grid.size
for i, img in enumerate(__lowerCamelCase ):
grid.paste(__lowerCamelCase , box=(i % cols * w, i // cols * h) )
return grid
def A (__lowerCamelCase :Dict , __lowerCamelCase :Union[str, Any]="robotic cat with wings" , __lowerCamelCase :List[str]=7.5 , __lowerCamelCase :Optional[Any]=50 , __lowerCamelCase :str=1 , __lowerCamelCase :Tuple=42 , ):
_lowerCAmelCase = torch.Generator(pipeline.device ).manual_seed(__lowerCamelCase )
_lowerCAmelCase = pipeline(
__lowerCamelCase , guidance_scale=__lowerCamelCase , num_inference_steps=__lowerCamelCase , generator=__lowerCamelCase , num_images_per_prompt=__lowerCamelCase , ).images
_lowerCAmelCase = int(math.sqrt(__lowerCamelCase ) )
_lowerCAmelCase = image_grid(__lowerCamelCase , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
_lowercase = parse_args()
# Load models and create wrapper for stable diffusion
_lowercase = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="""tokenizer""")
_lowercase = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""text_encoder""")
_lowercase = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="""vae""")
_lowercase = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""unet""")
_lowercase = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
_lowercase = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, """best_model.pt""")):
_lowercase = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, """unet""", unet)
else:
_lowercase = unet.to(torch.device("""cuda""", args.cuda_id))
_lowercase = pipeline.to(unet.device)
_lowercase , _lowercase = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, """{}.png""".format("""_""".join(args.caption.split()))))
_lowercase = os.path.join(args.pretrained_model_name_or_path, """_""".join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, """{}.png""".format(idx + 1)))
| 362 |
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@register_to_config
def __init__( self , _lowercase , _lowercase = None , _lowercase = None ):
"""simple docstring"""
super().__init__()
_lowerCAmelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
_lowerCAmelCase = torch.zeros(_lowercase , _lowercase )
else:
_lowerCAmelCase = None
_lowerCAmelCase = torch.nn.Parameter(_lowercase )
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : VQModel
_lowercase : CLIPTextModel
_lowercase : CLIPTokenizer
_lowercase : TransformeraDModel
_lowercase : LearnedClassifierFreeSamplingEmbeddings
_lowercase : VQDiffusionScheduler
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ):
"""simple docstring"""
super().__init__()
self.register_modules(
vqvae=_lowercase , transformer=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , )
def _lowercase ( self , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = len(_lowercase ) if isinstance(_lowercase , _lowercase ) else 1
# get prompt text embeddings
_lowerCAmelCase = self.tokenizer(
_lowercase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
_lowerCAmelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_lowerCAmelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F' {self.tokenizer.model_max_length} tokens: {removed_text}' )
_lowerCAmelCase = text_input_ids[:, : self.tokenizer.model_max_length]
_lowerCAmelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
_lowerCAmelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=_lowercase )
# duplicate text embeddings for each generation per prompt
_lowerCAmelCase = prompt_embeds.repeat_interleave(_lowercase , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
_lowerCAmelCase = self.learned_classifier_free_sampling_embeddings.embeddings
_lowerCAmelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(_lowercase , 1 , 1 )
else:
_lowerCAmelCase = [""""""] * batch_size
_lowerCAmelCase = text_input_ids.shape[-1]
_lowerCAmelCase = self.tokenizer(
_lowercase , padding="""max_length""" , max_length=_lowercase , truncation=_lowercase , return_tensors="""pt""" , )
_lowerCAmelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
_lowerCAmelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=_lowercase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_lowerCAmelCase = negative_prompt_embeds.shape[1]
_lowerCAmelCase = negative_prompt_embeds.repeat(1 , _lowercase , 1 )
_lowerCAmelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _lowercase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCAmelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , _lowercase , _lowercase = 100 , _lowercase = 5.0 , _lowercase = 1.0 , _lowercase = 1 , _lowercase = None , _lowercase = None , _lowercase = "pil" , _lowercase = True , _lowercase = None , _lowercase = 1 , ):
"""simple docstring"""
if isinstance(_lowercase , _lowercase ):
_lowerCAmelCase = 1
elif isinstance(_lowercase , _lowercase ):
_lowerCAmelCase = len(_lowercase )
else:
raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(_lowercase )}' )
_lowerCAmelCase = batch_size * num_images_per_prompt
_lowerCAmelCase = guidance_scale > 1.0
_lowerCAmelCase = self._encode_prompt(_lowercase , _lowercase , _lowercase )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_lowercase , _lowercase ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(_lowercase )}.' )
# get the initial completely masked latents unless the user supplied it
_lowerCAmelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
_lowerCAmelCase = self.transformer.num_vector_embeds - 1
_lowerCAmelCase = torch.full(_lowercase , _lowercase ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"""Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"""
F' {self.transformer.num_vector_embeds - 1} (inclusive).' )
_lowerCAmelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_lowercase , device=self.device )
_lowerCAmelCase = self.scheduler.timesteps.to(self.device )
_lowerCAmelCase = latents
for i, t in enumerate(self.progress_bar(_lowercase ) ):
# expand the sample if we are doing classifier free guidance
_lowerCAmelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
_lowerCAmelCase = self.transformer(_lowercase , encoder_hidden_states=_lowercase , timestep=_lowercase ).sample
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase = model_output.chunk(2 )
_lowerCAmelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(_lowercase , dim=1 , keepdim=_lowercase )
_lowerCAmelCase = self.truncate(_lowercase , _lowercase )
# remove `log(0)`'s (`-inf`s)
_lowerCAmelCase = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase = self.scheduler.step(_lowercase , timestep=_lowercase , sample=_lowercase , generator=_lowercase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_lowercase , _lowercase , _lowercase )
_lowerCAmelCase = self.vqvae.config.vq_embed_dim
_lowerCAmelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
_lowerCAmelCase = self.vqvae.quantize.get_codebook_entry(_lowercase , shape=_lowercase )
_lowerCAmelCase = self.vqvae.decode(_lowercase , force_not_quantize=_lowercase ).sample
_lowerCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowerCAmelCase = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
def _lowercase ( self , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = torch.sort(_lowercase , 1 , descending=_lowercase )
_lowerCAmelCase = torch.exp(_lowercase )
_lowerCAmelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
_lowerCAmelCase = torch.full_like(keep_mask[:, 0:1, :] , _lowercase )
_lowerCAmelCase = torch.cat((all_true, keep_mask) , dim=1 )
_lowerCAmelCase = keep_mask[:, :-1, :]
_lowerCAmelCase = keep_mask.gather(1 , indices.argsort(1 ) )
_lowerCAmelCase = log_p_x_0.clone()
_lowerCAmelCase = -torch.inf # -inf = log(0)
return rv
| 229 | 0 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
snake_case : List[str] = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class _snake_case ( _snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = XLMProphetNetTokenizer
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = True
def SCREAMING_SNAKE_CASE__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
a :int = XLMProphetNetTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Optional[int] = '''[PAD]'''
a :Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ) , _lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''[PAD]''' )
self.assertEqual(vocab_keys[1] , '''[CLS]''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(_lowerCamelCase ) , 1012 )
def SCREAMING_SNAKE_CASE__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1012 )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Tuple = XLMProphetNetTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase )
a :List[str] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_lowerCamelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
a :Optional[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
a :str = tokenizer.convert_tokens_to_ids(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
a :List[Any] = tokenizer.convert_ids_to_tokens(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''[UNK]''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''[UNK]''',
'''.''',
] , )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ):
return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[Any] = '''Hello World!'''
a :str = [3_5389, 6672, 49, 2]
self.assertListEqual(_lowerCamelCase , self.big_tokenizer.encode(_lowerCamelCase ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
# fmt: off
a :List[str] = {'''input_ids''': [[1_1073, 8_2783, 18, 26, 8_2783, 549, 5_1540, 248, 1_7209, 1301, 217, 20, 21_5186, 1325, 147, 1_7209, 1301, 217, 20, 5_6370, 53, 12_2020, 20, 1_6477, 27, 8_7355, 4548, 20, 4728, 7_8392, 17, 15_9969, 18, 26, 2_4491, 629, 15, 538, 2_2704, 5439, 15, 2788, 2_4491, 9885, 15, 4_3534, 605, 15, 814, 1_8403, 3_3200, 29, 15, 4_3534, 2_4458, 1_2410, 111, 2_4966, 8_3669, 9637, 14_4068, 26, 850, 2_2346, 27, 147, 2_4966, 8_3669, 8_3490, 26, 3_9113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 12_2020, 11_5785, 34, 816, 1339, 4_6887, 18, 147, 5_3905, 1951, 4_2238, 4_1170, 1_7732, 834, 436, 15, 2_7523, 9_8733, 217, 147, 5542, 4981, 930, 1_7347, 16, 2], [2_0091, 629, 94, 8_2786, 58, 490, 20, 1528, 84, 5_3905, 344, 8_0592, 11_0128, 1_8822, 5267, 1306, 62, 15_2537, 308, 7997, 401, 12_4427, 549, 3_5442, 225, 109, 1_5055, 2_5748, 147, 7119, 4_3712, 34, 767, 13_5366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 6_3784, 11_9466, 17, 14_7808, 8_8214, 18, 656, 81, 32, 3296, 1_0280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCamelCase , model_name='''microsoft/xprophetnet-large-wiki100-cased''' , revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' , )
| 94 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
_a = {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/config.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/config.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/config.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/config.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/config.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/config.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json',
}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = """albert"""
def __init__( self , lowercase_=3_0000 , lowercase_=128 , lowercase_=4096 , lowercase_=12 , lowercase_=1 , lowercase_=64 , lowercase_=1_6384 , lowercase_=1 , lowercase_="gelu_new" , lowercase_=0 , lowercase_=0 , lowercase_=512 , lowercase_=2 , lowercase_=0.02 , lowercase_=1E-1_2 , lowercase_=0.1 , lowercase_="absolute" , lowercase_=0 , lowercase_=2 , lowercase_=3 , **lowercase_ , ):
"""simple docstring"""
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
UpperCAmelCase_ : int = vocab_size
UpperCAmelCase_ : Optional[int] = embedding_size
UpperCAmelCase_ : List[str] = hidden_size
UpperCAmelCase_ : Optional[int] = num_hidden_layers
UpperCAmelCase_ : Union[str, Any] = num_hidden_groups
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : Any = inner_group_num
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : Union[str, Any] = intermediate_size
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[Any] = max_position_embeddings
UpperCAmelCase_ : Any = type_vocab_size
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : Optional[int] = layer_norm_eps
UpperCAmelCase_ : List[Any] = classifier_dropout_prob
UpperCAmelCase_ : Tuple = position_embedding_type
class A_ (lowercase__ ):
'''simple docstring'''
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
UpperCAmelCase_ : int = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ : Optional[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 61 | 0 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
# preprocessing the first row
for i in range(1 ,len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 ,len(lowercase ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 ,len(lowercase ) ):
for j in range(1 ,len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] ,matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 176 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : Tuple = {
'snap-research/efficientformer-l1-300': (
'https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'
),
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """efficientformer"""
def __init__( self , A = [3, 2, 6, 4] , A = [4_8, 9_6, 2_2_4, 4_4_8] , A = [True, True, True, True] , A = 4_4_8 , A = 3_2 , A = 4 , A = 7 , A = 5 , A = 8 , A = 4 , A = 0.0 , A = 1_6 , A = 3 , A = 3 , A = 3 , A = 2 , A = 1 , A = 0.0 , A = 1 , A = True , A = True , A = 1e-5 , A = "gelu" , A = 0.02 , A = 1e-1_2 , A = 2_2_4 , A = 1e-0_5 , **A , ) -> None:
super().__init__(**A )
snake_case : Dict = hidden_act
snake_case : int = hidden_dropout_prob
snake_case : Any = hidden_sizes
snake_case : Optional[Any] = num_hidden_layers
snake_case : List[Any] = num_attention_heads
snake_case : List[Any] = initializer_range
snake_case : str = layer_norm_eps
snake_case : Dict = patch_size
snake_case : Optional[int] = num_channels
snake_case : int = depths
snake_case : Optional[int] = mlp_expansion_ratio
snake_case : Any = downsamples
snake_case : Dict = dim
snake_case : Optional[int] = key_dim
snake_case : Union[str, Any] = attention_ratio
snake_case : Any = resolution
snake_case : Dict = pool_size
snake_case : Any = downsample_patch_size
snake_case : Tuple = downsample_stride
snake_case : Any = downsample_pad
snake_case : Union[str, Any] = drop_path_rate
snake_case : List[str] = num_metaad_blocks
snake_case : Union[str, Any] = distillation
snake_case : List[str] = use_layer_scale
snake_case : int = layer_scale_init_value
snake_case : Union[str, Any] = image_size
snake_case : Dict = batch_norm_eps
| 176 | 1 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
A : List[Any] = random.Random()
def UpperCamelCase ( __magic_name__ : Any , __magic_name__ : Tuple=1.0 , __magic_name__ : Optional[int]=None , __magic_name__ : str=None ) -> Dict:
"""simple docstring"""
if rng is None:
lowercase__ = global_rng
lowercase__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self : int , _UpperCAmelCase : Dict , _UpperCAmelCase : int=7 , _UpperCAmelCase : Tuple=400 , _UpperCAmelCase : Tuple=2000 , _UpperCAmelCase : Dict=10 , _UpperCAmelCase : str=160 , _UpperCAmelCase : List[str]=8 , _UpperCAmelCase : List[str]=0.0 , _UpperCAmelCase : int=4000 , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : Union[str, Any]=True , ) -> List[str]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = min_seq_length
lowercase__ = max_seq_length
lowercase__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowercase__ = padding_value
lowercase__ = sampling_rate
lowercase__ = return_attention_mask
lowercase__ = do_normalize
lowercase__ = feature_size
lowercase__ = chunk_length
lowercase__ = hop_length
def lowerCamelCase__ (self : str ) -> Dict:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : Dict=False ) -> Optional[Any]:
"""simple docstring"""
def _flatten(_UpperCAmelCase : Tuple ):
return list(itertools.chain(*UpperCAmelCase_ ) )
if equal_length:
lowercase__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowercase__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowercase__ = [np.asarray(UpperCAmelCase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class A ( snake_case__ , unittest.TestCase ):
'''simple docstring'''
A__ = WhisperFeatureExtractor if is_speech_available() else None
def lowerCamelCase__ (self : str ) -> List[Any]:
"""simple docstring"""
lowercase__ = WhisperFeatureExtractionTester(self )
def lowerCamelCase__ (self : Dict ) -> List[Any]:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ = feat_extract_first.save_pretrained(UpperCAmelCase_ )[0]
check_json_file_has_correct_format(UpperCAmelCase_ )
lowercase__ = self.feature_extraction_class.from_pretrained(UpperCAmelCase_ )
lowercase__ = feat_extract_first.to_dict()
lowercase__ = feat_extract_second.to_dict()
lowercase__ = feat_extract_first.mel_filters
lowercase__ = feat_extract_second.mel_filters
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ ) )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase__ (self : List[str] ) -> int:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ = os.path.join(UpperCAmelCase_ , """feat_extract.json""" )
feat_extract_first.to_json_file(UpperCAmelCase_ )
lowercase__ = self.feature_extraction_class.from_json_file(UpperCAmelCase_ )
lowercase__ = feat_extract_first.to_dict()
lowercase__ = feat_extract_second.to_dict()
lowercase__ = feat_extract_first.mel_filters
lowercase__ = feat_extract_second.mel_filters
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ ) )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase__ (self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowercase__ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase__ = [np.asarray(UpperCAmelCase_ ) for speech_input in speech_inputs]
# Test feature size
lowercase__ = feature_extractor(UpperCAmelCase_ , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
lowercase__ = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
lowercase__ = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 ) )
# Test batched
lowercase__ = feature_extractor(UpperCAmelCase_ , return_tensors="""np""" ).input_features
lowercase__ = feature_extractor(UpperCAmelCase_ , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowercase__ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowercase__ = np.asarray(UpperCAmelCase_ )
lowercase__ = feature_extractor(UpperCAmelCase_ , return_tensors="""np""" ).input_features
lowercase__ = feature_extractor(UpperCAmelCase_ , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 ) )
# Test truncation required
lowercase__ = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
lowercase__ = [np.asarray(UpperCAmelCase_ ) for speech_input in speech_inputs]
lowercase__ = [x[: feature_extractor.n_samples] for x in speech_inputs]
lowercase__ = [np.asarray(UpperCAmelCase_ ) for speech_input in speech_inputs_truncated]
lowercase__ = feature_extractor(UpperCAmelCase_ , return_tensors="""np""" ).input_features
lowercase__ = feature_extractor(UpperCAmelCase_ , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 ) )
def lowerCamelCase__ (self : List[Any] ) -> Any:
"""simple docstring"""
import torch
lowercase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase__ = np.random.rand(100 , 32 ).astype(np.floataa )
lowercase__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowercase__ = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
lowercase__ = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : Optional[int] ) -> Dict:
"""simple docstring"""
lowercase__ = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
lowercase__ = ds.sort("""id""" ).select(range(UpperCAmelCase_ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def lowerCamelCase__ (self : Any ) -> Any:
"""simple docstring"""
lowercase__ = torch.tensor(
[
0.1_193, -0.0_946, -0.1_098, -0.0_196, 0.0_225, -0.0_690, -0.1_736, 0.0_951,
0.0_971, -0.0_817, -0.0_702, 0.0_162, 0.0_260, 0.0_017, -0.0_192, -0.1_678,
0.0_709, -0.1_867, -0.0_655, -0.0_274, -0.0_234, -0.1_884, -0.0_516, -0.0_554,
-0.0_274, -0.1_425, -0.1_423, 0.0_837, 0.0_377, -0.0_854
] )
# fmt: on
lowercase__ = self._load_datasamples(1 )
lowercase__ = WhisperFeatureExtractor()
lowercase__ = feature_extractor(UpperCAmelCase_ , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , UpperCAmelCase_ , atol=1E-4 ) )
def lowerCamelCase__ (self : int ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase__ = self._load_datasamples(1 )[0]
lowercase__ = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
lowercase__ = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=UpperCAmelCase_ )[0]
self.assertTrue(np.all(np.mean(UpperCAmelCase_ ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(UpperCAmelCase_ ) - 1 ) < 1E-3 ) )
| 305 |
"""simple docstring"""
from __future__ import annotations
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
print(f"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(_SCREAMING_SNAKE_CASE ):
print(f"""{i}\t\t{d}""" )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
for j in range(_SCREAMING_SNAKE_CASE ):
snake_case_ , snake_case_ , snake_case_ = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
return True
return False
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> list[float]:
snake_case_ = [float("""inf""" )] * vertex_count
snake_case_ = 0.0
for _ in range(vertex_count - 1 ):
for j in range(_SCREAMING_SNAKE_CASE ):
snake_case_ , snake_case_ , snake_case_ = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
snake_case_ = distance[u] + w
snake_case_ = check_negative_cycle(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if negative_cycle_exists:
raise Exception("""Negative cycle found""" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE : int = int(input('Enter number of vertices: ').strip())
__SCREAMING_SNAKE_CASE : Dict = int(input('Enter number of edges: ').strip())
__SCREAMING_SNAKE_CASE : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print('Edge ', i + 1)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = (
int(x)
for x in input('Enter source, destination, weight: ').strip().split(' ')
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = {'src': src, 'dst': dest, 'weight': weight}
__SCREAMING_SNAKE_CASE : Union[str, Any] = int(input('\nEnter shortest path source:').strip())
__SCREAMING_SNAKE_CASE : str = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 347 | 0 |
from __future__ import annotations
__A = list[tuple[int, int]]
__A = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__A = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : float , UpperCAmelCase_ : Node | None , ) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Optional[int] =pos_x
lowerCamelCase__: str =pos_y
lowerCamelCase__: int =(pos_y, pos_x)
lowerCamelCase__: str =goal_x
lowerCamelCase__: Dict =goal_y
lowerCamelCase__: int =g_cost
lowerCamelCase__: str =parent
lowerCamelCase__: List[Any] =self.calculate_heuristic()
def SCREAMING_SNAKE_CASE_ (self : Any) ->float:
'''simple docstring'''
lowerCamelCase__: Optional[int] =abs(self.pos_x - self.goal_x)
lowerCamelCase__: Dict =abs(self.pos_y - self.goal_y)
return dx + dy
def __lt__(self : List[str] , UpperCAmelCase_ : Tuple) ->bool:
'''simple docstring'''
return self.f_cost < other.f_cost
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : List[str] , UpperCAmelCase_ : tuple[int, int] , UpperCAmelCase_ : tuple[int, int]) ->str:
'''simple docstring'''
lowerCamelCase__: Dict =Node(start[1] , start[0] , goal[1] , goal[0] , 0 , UpperCAmelCase_)
lowerCamelCase__: str =Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , UpperCAmelCase_)
lowerCamelCase__: Any =[self.start]
lowerCamelCase__: list[Node] =[]
lowerCamelCase__: Any =False
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Path | None:
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
lowerCamelCase__: Optional[Any] =self.open_nodes.pop(0)
if current_node.pos == self.target.pos:
lowerCamelCase__: int =True
return self.retrace_path(UpperCAmelCase_)
self.closed_nodes.append(UpperCAmelCase_)
lowerCamelCase__: List[str] =self.get_successors(UpperCAmelCase_)
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(UpperCAmelCase_)
else:
# retrieve the best current path
lowerCamelCase__: Dict =self.open_nodes.pop(self.open_nodes.index(UpperCAmelCase_))
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(UpperCAmelCase_)
else:
self.open_nodes.append(UpperCAmelCase_)
if not self.reached:
return [self.start.pos]
return None
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : Node) ->list[Node]:
'''simple docstring'''
lowerCamelCase__: List[Any] =[]
for action in delta:
lowerCamelCase__: Any =parent.pos_x + action[1]
lowerCamelCase__: List[str] =parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(UpperCAmelCase_) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
UpperCAmelCase_ , UpperCAmelCase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , UpperCAmelCase_ , ))
return successors
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Node | None) ->Path:
'''simple docstring'''
lowerCamelCase__: Optional[int] =node
lowerCamelCase__: List[str] =[]
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
lowerCamelCase__: List[Any] =current_node.parent
path.reverse()
return path
if __name__ == "__main__":
__A = (0, 0)
__A = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("------")
__A = GreedyBestFirst(init, goal)
__A = greedy_bf.search()
if path:
for pos_x, pos_y in path:
__A = 2
for elem in grid:
print(elem)
| 273 |
from ..utils import DummyObject, requires_backends
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch", "torchsde"]
def __init__(self : Union[str, Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Tuple) ->Any:
'''simple docstring'''
requires_backends(self , ["torch", "torchsde"])
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Dict , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Optional[int]) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch", "torchsde"])
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Tuple , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Any) ->Any:
'''simple docstring'''
requires_backends(cls , ["torch", "torchsde"])
| 273 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A: List[Any] = logging.get_logger(__name__)
A: int = {
'''xlm-mlm-en-2048''': '''https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json''',
'''xlm-mlm-ende-1024''': '''https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-enfr-1024''': '''https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json''',
'''xlm-mlm-enro-1024''': '''https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json''',
'''xlm-mlm-tlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json''',
'''xlm-mlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json''',
'''xlm-clm-enfr-1024''': '''https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json''',
'''xlm-clm-ende-1024''': '''https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-17-1280''': '''https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json''',
'''xlm-mlm-100-1280''': '''https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : Optional[int] = "xlm"
__lowerCAmelCase : List[Any] = {
"hidden_size": "emb_dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
"n_words": "vocab_size", # For backward compatibility
}
def __init__( self , _SCREAMING_SNAKE_CASE=30145 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2048**-0.5 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="first" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0 , **_SCREAMING_SNAKE_CASE , ) -> int:
'''simple docstring'''
UpperCAmelCase : Any = vocab_size
UpperCAmelCase : Any = emb_dim
UpperCAmelCase : Dict = n_layers
UpperCAmelCase : Optional[Any] = n_heads
UpperCAmelCase : Any = dropout
UpperCAmelCase : Tuple = attention_dropout
UpperCAmelCase : Union[str, Any] = gelu_activation
UpperCAmelCase : Optional[int] = sinusoidal_embeddings
UpperCAmelCase : List[str] = causal
UpperCAmelCase : int = asm
UpperCAmelCase : Any = n_langs
UpperCAmelCase : List[str] = use_lang_emb
UpperCAmelCase : Optional[Any] = layer_norm_eps
UpperCAmelCase : Any = bos_index
UpperCAmelCase : Optional[int] = eos_index
UpperCAmelCase : Optional[Any] = pad_index
UpperCAmelCase : Optional[Any] = unk_index
UpperCAmelCase : List[str] = mask_index
UpperCAmelCase : Optional[int] = is_encoder
UpperCAmelCase : List[Any] = max_position_embeddings
UpperCAmelCase : Optional[int] = embed_init_std
UpperCAmelCase : Optional[Any] = init_std
UpperCAmelCase : List[str] = summary_type
UpperCAmelCase : List[str] = summary_use_proj
UpperCAmelCase : Optional[int] = summary_activation
UpperCAmelCase : Optional[int] = summary_proj_to_labels
UpperCAmelCase : str = summary_first_dropout
UpperCAmelCase : List[Any] = start_n_top
UpperCAmelCase : Dict = end_n_top
UpperCAmelCase : str = mask_token_id
UpperCAmelCase : Any = lang_id
if "n_words" in kwargs:
UpperCAmelCase : Dict = kwargs["""n_words"""]
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
@property
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
UpperCAmelCase : str = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 109 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_lowerCamelCase : List[Any] = HfApi()
_lowerCamelCase : Dict = {}
# fmt: off
_lowerCamelCase : List[Any] = torch.tensor([
-0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7,
1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9,
-1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9,
0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7
])
_lowerCamelCase : int = torch.tensor([
-2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6,
1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8,
-2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8,
2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5
])
_lowerCamelCase : Optional[int] = torch.tensor([
-0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9,
-0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4,
-0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5,
0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3
])
_lowerCamelCase : Dict = torch.tensor([
0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2,
-0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9,
0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5,
-0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5
])
_lowerCamelCase : Dict = torch.tensor([
0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3,
-0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5,
0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9,
-0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6
])
_lowerCamelCase : List[Any] = torch.tensor([
0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8,
-0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0,
0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3,
-0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1
])
_lowerCamelCase : Dict = torch.tensor([
0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2,
-0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8,
0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4,
-0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0
])
_lowerCamelCase : int = torch.tensor([
0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2,
-0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0,
0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6,
-0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3
])
_lowerCamelCase : int = torch.tensor([
-1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0,
1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3,
-2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0,
1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1])
_lowerCamelCase : Tuple = torch.tensor([
-1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4,
0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1,
-2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9,
1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6
])
_lowerCamelCase : List[str] = torch.tensor([
-1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2,
0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7,
-2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1,
1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5
])
_lowerCamelCase : int = torch.tensor([
-2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9,
1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1,
-3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1,
3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6
])
_lowerCamelCase : Tuple = torch.tensor([
-2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0,
1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8,
-2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5,
2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3
])
_lowerCamelCase : int = torch.tensor([
-2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6,
1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8,
-3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0,
3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3
])
_lowerCamelCase : List[Any] = torch.tensor([
-1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4,
1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1,
-2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9,
1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9
])
# fmt: on
_lowerCamelCase : List[str] = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_lowerCamelCase : Any = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(F'Started running {mod.modelId}!!!')
if mod.modelId.startswith('''CompVis'''):
_lowerCamelCase : Optional[Any] = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
_lowerCamelCase : int = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_lowerCamelCase : Union[str, Any] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_lowerCamelCase : int = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
_lowerCamelCase : int = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3
)
print(F'{mod.modelId} has passed successfully!!!') | 282 | 0 |
'''simple docstring'''
import numpy as np
def UpperCamelCase_( snake_case : np.ndarray , snake_case : np.ndarray , snake_case : float = 1e-12 , snake_case : int = 1_0_0 , ):
'''simple docstring'''
assert np.shape(snake_case )[0] == np.shape(snake_case )[1]
# Ensure proper dimensionality.
assert np.shape(snake_case )[0] == np.shape(snake_case )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(snake_case ) == np.iscomplexobj(snake_case )
snake_case_ = np.iscomplexobj(snake_case )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(snake_case , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
snake_case_ = False
snake_case_ = 0
snake_case_ = 0
snake_case_ = 1e12
while not convergence:
# Multiple matrix by the vector.
snake_case_ = np.dot(snake_case , snake_case )
# Normalize the resulting output vector.
snake_case_ = w / np.linalg.norm(snake_case )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
snake_case_ = vector.conj().T if is_complex else vector.T
snake_case_ = np.dot(snake_case , np.dot(snake_case , snake_case ) )
# Check convergence.
snake_case_ = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
snake_case_ = True
snake_case_ = lambda_
if is_complex:
snake_case_ = np.real(lambda_ )
return lambda_, vector
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = np.array([[4_1, 4, 2_0], [4, 2_6, 3_0], [2_0, 3_0, 5_0]] )
snake_case_ = np.array([4_1, 4, 2_0] )
snake_case_ = real_input_matrix.astype(np.complexaaa )
snake_case_ = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
snake_case_ = np.array([4_1, 4, 2_0] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
snake_case_ = real_input_matrix
snake_case_ = real_vector
elif problem_type == "complex":
snake_case_ = complex_input_matrix
snake_case_ = complex_vector
# Our implementation.
snake_case_ , snake_case_ = power_iteration(snake_case , snake_case )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
snake_case_ , snake_case_ = np.linalg.eigh(snake_case )
# Last eigenvalue is the maximum one.
snake_case_ = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
snake_case_ = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(snake_case ) - np.abs(snake_case ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 92 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[Any] = {"vocab_file": "sentencepiece.bpe.model"}
_SCREAMING_SNAKE_CASE : Tuple = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
}
_SCREAMING_SNAKE_CASE : List[Any] = {
"moussaKam/mbarthez": 1024,
"moussaKam/barthez": 1024,
"moussaKam/barthez-orangesum-title": 1024,
}
_SCREAMING_SNAKE_CASE : Any = "▁"
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : List[Any] = VOCAB_FILES_NAMES
lowerCAmelCase_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : Tuple = ["input_ids", "attention_mask"]
def __init__( self , a__ , a__="<s>" , a__="</s>" , a__="</s>" , a__="<s>" , a__="<unk>" , a__="<pad>" , a__="<mask>" , a__ = None , **a__ , ) -> None:
'''simple docstring'''
snake_case_ = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , cls_token=a__ , pad_token=a__ , mask_token=a__ , sp_model_kwargs=self.sp_model_kwargs , **a__ , )
snake_case_ = vocab_file
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(a__ ) )
snake_case_ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
snake_case_ = len(self.sp_model ) - 1
snake_case_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def lowerCAmelCase__ ( self , a__ , a__ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase__ ( self , a__ , a__ = None , a__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
if token_ids_a is None:
return [1] + ([0] * len(a__ )) + [1]
return [1] + ([0] * len(a__ )) + [1, 1] + ([0] * len(a__ )) + [1]
def lowerCAmelCase__ ( self , a__ , a__ = None ) -> List[int]:
'''simple docstring'''
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return len(self.sp_model )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = {self.convert_ids_to_tokens(a__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase__ ( self , a__ ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(a__ , out_type=a__ )
def lowerCAmelCase__ ( self , a__ ) -> int:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case_ = self.sp_model.PieceToId(a__ )
return spm_id if spm_id else self.unk_token_id
def lowerCAmelCase__ ( self , a__ ) -> Tuple:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(a__ )
def lowerCAmelCase__ ( self , a__ ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = []
snake_case_ = ""
snake_case_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a__ ) + token
snake_case_ = True
snake_case_ = []
else:
current_sub_tokens.append(a__ )
snake_case_ = False
out_string += self.sp_model.decode(a__ )
return out_string.strip()
def __getstate__( self ) -> Dict:
'''simple docstring'''
snake_case_ = self.__dict__.copy()
snake_case_ = None
return state
def __setstate__( self , a__ ) -> str:
'''simple docstring'''
snake_case_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase__ ( self , a__ , a__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(a__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
snake_case_ = os.path.join(
a__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a__ )
elif not os.path.isfile(self.vocab_file ):
with open(a__ , "wb" ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(a__ )
return (out_vocab_file,)
| 92 | 1 |
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
lowercase = abs(a_ )
lowercase = 0
while n > 0:
res += n % 1_0
n //= 1_0
return res
def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
lowercase = abs(a_ )
return n if n < 1_0 else n % 1_0 + sum_of_digits(n // 1_0 )
def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
return sum(int(a_ ) for c in str(abs(a_ ) ) )
def UpperCAmelCase__ ( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowerCAmelCase__ :Callable , lowerCAmelCase__ :int ) -> None:
lowercase = f'{func.__name__}({value})'
lowercase = timeit(f'__main__.{call}' , setup="""import __main__""" )
print(f'{call:56} = {func(a_ )} -- {timing:.4f} seconds' )
for value in (2_6_2_1_4_4, 1_1_2_5_8_9_9_9_0_6_8_4_2_6_2_4, 1_2_6_7_6_5_0_6_0_0_2_2_8_2_2_9_4_0_1_4_9_6_7_0_3_2_0_5_3_7_6):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(a_ , a_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 197 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A = {
'''configuration_luke''': ['''LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LukeConfig'''],
'''tokenization_luke''': ['''LukeTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''LUKE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LukeForEntityClassification''',
'''LukeForEntityPairClassification''',
'''LukeForEntitySpanClassification''',
'''LukeForMultipleChoice''',
'''LukeForQuestionAnswering''',
'''LukeForSequenceClassification''',
'''LukeForTokenClassification''',
'''LukeForMaskedLM''',
'''LukeModel''',
'''LukePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 160 | 0 |
from string import ascii_lowercase, ascii_uppercase
def snake_case_ (__A : str ) -> str:
if not sentence:
return ""
__lowerCAmelCase : str = dict(zip(__A , __A ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 139 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCAmelCase : Dict , lowerCAmelCase : Dict=13 , lowerCAmelCase : int=7 , lowerCAmelCase : Any=True , lowerCAmelCase : str=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : str=True , lowerCAmelCase : Tuple=99 , lowerCAmelCase : int=64 , lowerCAmelCase : Any=32 , lowerCAmelCase : str=5 , lowerCAmelCase : List[str]=4 , lowerCAmelCase : str=37 , lowerCAmelCase : Union[str, Any]="gelu" , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Optional[int]=5_12 , lowerCAmelCase : List[str]=16 , lowerCAmelCase : str=2 , lowerCAmelCase : Union[str, Any]=0.02 , lowerCAmelCase : Dict=3 , lowerCAmelCase : int=4 , lowerCAmelCase : Union[str, Any]=None , ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = parent
__lowerCAmelCase : Tuple = batch_size
__lowerCAmelCase : Dict = seq_length
__lowerCAmelCase : List[str] = is_training
__lowerCAmelCase : Dict = use_input_mask
__lowerCAmelCase : Optional[int] = use_token_type_ids
__lowerCAmelCase : List[str] = use_labels
__lowerCAmelCase : Dict = vocab_size
__lowerCAmelCase : List[str] = hidden_size
__lowerCAmelCase : Optional[int] = embedding_size
__lowerCAmelCase : Optional[int] = num_hidden_layers
__lowerCAmelCase : Optional[Any] = num_attention_heads
__lowerCAmelCase : Optional[Any] = intermediate_size
__lowerCAmelCase : Optional[int] = hidden_act
__lowerCAmelCase : Any = hidden_dropout_prob
__lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
__lowerCAmelCase : List[str] = max_position_embeddings
__lowerCAmelCase : Optional[Any] = type_vocab_size
__lowerCAmelCase : Optional[Any] = type_sequence_label_size
__lowerCAmelCase : Optional[Any] = initializer_range
__lowerCAmelCase : Optional[Any] = num_labels
__lowerCAmelCase : Union[str, Any] = num_choices
__lowerCAmelCase : Union[str, Any] = scope
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : Dict = None
if self.use_input_mask:
__lowerCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : List[str] = None
if self.use_token_type_ids:
__lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase : List[Any] = None
__lowerCAmelCase : Tuple = None
__lowerCAmelCase : int = None
if self.use_labels:
__lowerCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : Any , lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Any = MobileBertModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : Union[str, Any] = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase )
__lowerCAmelCase : List[Any] = model(lowerCAmelCase , token_type_ids=lowerCAmelCase )
__lowerCAmelCase : Tuple = model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : str , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = MobileBertForMaskedLM(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : List[Any] = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = MobileBertForNextSentencePrediction(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : List[Any] = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase : Any , lowerCAmelCase : Any , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] , lowerCAmelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = MobileBertForPreTraining(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : Optional[int] = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , next_sentence_label=lowerCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : int , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = MobileBertForQuestionAnswering(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : int = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.num_labels
__lowerCAmelCase : int = MobileBertForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : Optional[int] = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : Any , lowerCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.num_labels
__lowerCAmelCase : Dict = MobileBertForTokenClassification(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : Tuple = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = self.num_choices
__lowerCAmelCase : List[Any] = MobileBertForMultipleChoice(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : Optional[int] = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,
) : List[Any] = config_and_inputs
__lowerCAmelCase : List[str] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : str =(
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase : Optional[int] =(
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase : Union[str, Any] =True
def SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase : str , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any]=False ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
if return_labels:
if model_class in get_values(lowerCAmelCase ):
__lowerCAmelCase : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : int = MobileBertModelTester(self )
__lowerCAmelCase : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCAmelCase )
def snake_case_ (__A : Any ) -> Optional[Any]:
return torch.tensor(
__A , dtype=torch.long , device=__A , )
__UpperCAmelCase = 1e-3
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = MobileBertModel.from_pretrained("""google/mobilebert-uncased""" ).to(lowerCAmelCase )
__lowerCAmelCase : int = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
__lowerCAmelCase : List[str] = model(lowerCAmelCase )[0]
__lowerCAmelCase : List[Any] = torch.Size((1, 9, 5_12) )
self.assertEqual(output.shape , lowerCAmelCase )
__lowerCAmelCase : int = torch.tensor(
[
[
[-2.473_6526e07, 8.269_1656e04, 1.652_1838e05],
[-5.754_1704e-01, 3.905_6022e00, 4.401_1507e00],
[2.604_7359e00, 1.567_7652e00, -1.732_4188e-01],
]
] , device=lowerCAmelCase , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
__lowerCAmelCase : Union[str, Any] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
__lowerCAmelCase : Union[str, Any] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 139 | 1 |
"""simple docstring"""
from math import factorial
A_ : dict[str, int] = {str(digit): factorial(digit) for digit in range(10)}
def A ( snake_case__ ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
raise TypeError("""Parameter number must be int""" )
if number < 0:
raise ValueError("""Parameter number must be greater than or equal to 0""" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(snake_case__ ) )
def A ( snake_case__ = 60 , snake_case__ = 1_00_00_00 ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ) or not isinstance(snake_case__ , snake_case__ ):
raise TypeError("""Parameters chain_length and number_limit must be int""" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"""Parameters chain_length and number_limit must be greater than 0""" )
# the counter for the chains with the exact desired length
SCREAMING_SNAKE_CASE__ = 0
# the cached sizes of the previous chains
SCREAMING_SNAKE_CASE__ = {}
for start_chain_element in range(1 , snake_case__ ):
# The temporary set will contain the elements of the chain
SCREAMING_SNAKE_CASE__ = set()
SCREAMING_SNAKE_CASE__ = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
SCREAMING_SNAKE_CASE__ = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(snake_case__ )
chain_set_length += 1
SCREAMING_SNAKE_CASE__ = digit_factorial_sum(snake_case__ )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
SCREAMING_SNAKE_CASE__ = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{solution()}')
| 165 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A_ : List[Any] = logging.get_logger(__name__)
A_ : List[Any] = "▁"
A_ : str = {"vocab_file": "sentencepiece.bpe.model"}
A_ : Union[str, Any] = {
"vocab_file": {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"
),
}
}
A_ : List[str] = {
"xlm-roberta-base": 512,
"xlm-roberta-large": 512,
"xlm-roberta-large-finetuned-conll02-dutch": 512,
"xlm-roberta-large-finetuned-conll02-spanish": 512,
"xlm-roberta-large-finetuned-conll03-english": 512,
"xlm-roberta-large-finetuned-conll03-german": 512,
}
class lowerCamelCase (A__ ):
lowerCamelCase__ : Optional[int] = VOCAB_FILES_NAMES
lowerCamelCase__ : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Tuple = ['input_ids', 'attention_mask']
def __init__( self : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple="<s>" , __UpperCAmelCase : str="</s>" , __UpperCAmelCase : int="</s>" , __UpperCAmelCase : Tuple="<s>" , __UpperCAmelCase : Union[str, Any]="<unk>" , __UpperCAmelCase : List[str]="<pad>" , __UpperCAmelCase : Dict="<mask>" , __UpperCAmelCase : Optional[Dict[str, Any]] = None , **__UpperCAmelCase : Optional[int] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
SCREAMING_SNAKE_CASE__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
SCREAMING_SNAKE_CASE__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
SCREAMING_SNAKE_CASE__ = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = len(self.sp_model ) + self.fairseq_offset
SCREAMING_SNAKE_CASE__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Tuple , __UpperCAmelCase : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None , __UpperCAmelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1]
def SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : str ) -> List[str]:
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : List[str] ) -> Tuple:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE__ = self.sp_model.PieceToId(__UpperCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : int ) -> Any:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = """""".join(__UpperCAmelCase ).replace(__UpperCAmelCase , """ """ ).strip()
return out_string
def SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE__ = os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , """wb""" ) as fi:
SCREAMING_SNAKE_CASE__ = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 165 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'microsoft/swin-tiny-patch4-window7-224': (
'https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class SCREAMING_SNAKE_CASE (UpperCamelCase__ , UpperCamelCase__ ):
_UpperCamelCase : Any = """swin"""
_UpperCamelCase : Any = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Any , a : Optional[int]=224 , a : Tuple=4 , a : Optional[Any]=3 , a : List[str]=96 , a : List[str]=[2, 2, 6, 2] , a : Union[str, Any]=[3, 6, 12, 24] , a : int=7 , a : Optional[Any]=4.0 , a : Union[str, Any]=True , a : Dict=0.0 , a : Optional[int]=0.0 , a : Optional[int]=0.1 , a : Dict="gelu" , a : int=False , a : Optional[Any]=0.02 , a : str=1E-5 , a : str=32 , a : Tuple=None , a : List[Any]=None , **a : List[str] , )-> Union[str, Any]:
"""simple docstring"""
super().__init__(**__a )
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = embed_dim
lowercase__ = depths
lowercase__ = len(__a )
lowercase__ = num_heads
lowercase__ = window_size
lowercase__ = mlp_ratio
lowercase__ = qkv_bias
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = drop_path_rate
lowercase__ = hidden_act
lowercase__ = use_absolute_embeddings
lowercase__ = layer_norm_eps
lowercase__ = initializer_range
lowercase__ = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase__ = int(embed_dim * 2 ** (len(__a ) - 1) )
lowercase__ = ['stem'] + [f"""stage{idx}""" for idx in range(1 , len(__a ) + 1 )]
lowercase__ , lowercase__ = get_aligned_output_features_output_indices(
out_features=__a , out_indices=__a , stage_names=self.stage_names )
class SCREAMING_SNAKE_CASE (UpperCamelCase__ ):
_UpperCamelCase : Tuple = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE_ ( self : str )-> str:
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> str:
"""simple docstring"""
return 1E-4
| 368 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Any:
lowercase__ = MobileNetVaConfig(layer_norm_eps=0.0_0_1 )
if "_quant" in model_name:
raise ValueError('Quantized models are not supported.' )
lowercase__ = re.match(R'^mobilenet_v1_([^_]*)_([^_]*)$' , _SCREAMING_SNAKE_CASE )
if matches:
lowercase__ = float(matches[1] )
lowercase__ = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
lowercase__ = 1001
lowercase__ = 'imagenet-1k-id2label.json'
lowercase__ = 'huggingface/label-files'
lowercase__ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowercase__ = {int(_SCREAMING_SNAKE_CASE ) + 1: v for k, v in idalabel.items()}
lowercase__ = 'background'
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
return config
def __UpperCamelCase () -> int:
lowercase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowercase__ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]:
lowercase__ = get_mobilenet_va_config(_SCREAMING_SNAKE_CASE )
# Load 🤗 model
lowercase__ = MobileNetVaForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
lowercase__ = MobileNetVaImageProcessor(
crop_size={'width': config.image_size, 'height': config.image_size} , size={'shortest_edge': config.image_size + 32} , )
lowercase__ = image_processor(images=prepare_img() , return_tensors='pt' )
lowercase__ = model(**_SCREAMING_SNAKE_CASE )
lowercase__ = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
lowercase__ = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] )
elif model_name == "mobilenet_v1_0.75_192":
lowercase__ = torch.tensor([-3.9_4_4_0, -2.3_1_4_1, -0.3_3_3_3] )
else:
lowercase__ = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
print('Pushing to the hub...' )
lowercase__ = 'google/' + model_name
image_processor.push_to_hub(_SCREAMING_SNAKE_CASE )
model.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""mobilenet_v1_1.0_224""",
type=str,
help="""Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.""",
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original TensorFlow checkpoint (.ckpt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowercase_ = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 269 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__:Optional[Any] = {
'configuration_xmod': [
'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XmodConfig',
'XmodOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Union[str, Any] = [
'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST',
'XmodForCausalLM',
'XmodForMaskedLM',
'XmodForMultipleChoice',
'XmodForQuestionAnswering',
'XmodForSequenceClassification',
'XmodForTokenClassification',
'XmodModel',
'XmodPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__:List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 261 |
"""simple docstring"""
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class lowercase ( __UpperCAmelCase , __UpperCAmelCase):
__lowerCAmelCase : List[Any] = 1
@register_to_config
def __init__( self : Union[str, Any] , _lowerCamelCase : int = 10_00 , _lowerCamelCase : Optional[Union[np.ndarray, List[float]]] = None ):
"""simple docstring"""
self.set_timesteps(_lowerCamelCase )
# standard deviation of the initial noise distribution
A_ : Optional[Any] = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
A_ : List[str] = 4
# running values
A_ : Optional[int] = []
def a_ ( self : str , _lowerCamelCase : int , _lowerCamelCase : Union[str, torch.device] = None ):
"""simple docstring"""
A_ : Tuple = num_inference_steps
A_ : Tuple = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
A_ : Optional[Any] = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
A_ : str = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
A_ : int = torch.sin(steps * math.pi / 2 ) ** 2
A_ : Dict = (1.0 - self.betas**2) ** 0.5
A_ : Tuple = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
A_ : int = timesteps.to(_lowerCamelCase )
A_ : int = []
def a_ ( self : int , _lowerCamelCase : torch.FloatTensor , _lowerCamelCase : int , _lowerCamelCase : torch.FloatTensor , _lowerCamelCase : bool = True , ):
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
A_ : Union[str, Any] = (self.timesteps == timestep).nonzero().item()
A_ : Dict = timestep_index + 1
A_ : Union[str, Any] = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(_lowerCamelCase )
if len(self.ets ) == 1:
A_ : Dict = self.ets[-1]
elif len(self.ets ) == 2:
A_ : List[str] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
A_ : Optional[Any] = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
A_ : str = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
A_ : Union[str, Any] = self._get_prev_sample(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_lowerCamelCase )
def a_ ( self : Any , _lowerCamelCase : torch.FloatTensor , *_lowerCamelCase : List[str] , **_lowerCamelCase : List[Any] ):
"""simple docstring"""
return sample
def a_ ( self : int , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ : Optional[Any] = self.alphas[timestep_index]
A_ : Union[str, Any] = self.betas[timestep_index]
A_ : int = self.alphas[prev_timestep_index]
A_ : Tuple = self.betas[prev_timestep_index]
A_ : str = (sample - sigma * ets) / max(_lowerCamelCase , 1E-8 )
A_ : List[str] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : List[str] ):
"""simple docstring"""
return self.config.num_train_timesteps
| 167 | 0 |
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class lowerCAmelCase__ ( a_ ):
'''simple docstring'''
def __lt__( self , lowercase ):
return self[-1] < other[-1]
def __eq__( self , lowercase ):
return self[-1] == other[-1]
def _snake_case ( lowercase__ ):
_lowerCamelCase : str = []
# sort into stacks
for element in collection:
_lowerCamelCase : str = Stack([element] )
_lowerCamelCase : Optional[int] = bisect_left(lowerCamelCase__ , lowerCamelCase__ )
if i != len(lowerCamelCase__ ):
stacks[i].append(lowerCamelCase__ )
else:
stacks.append(lowerCamelCase__ )
# use a heap-based merge to merge stack efficiently
_lowerCamelCase : Tuple = merge(*(reversed(lowerCamelCase__ ) for stack in stacks) )
return collection
if __name__ == "__main__":
lowercase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowercase__ = [int(item) for item in user_input.split(""",""")]
print(patience_sort(unsorted)) | 371 |
"""simple docstring"""
import re
def _snake_case ( lowercase__ ):
_lowerCamelCase : Optional[int] = re.compile(r'^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$' )
if match := re.search(lowercase__ , lowercase__ ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("""+918827897895""")) | 12 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a : Tuple = {
"""configuration_upernet""": ["""UperNetConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : str = [
"""UperNetForSemanticSegmentation""",
"""UperNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
__a : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 210 | import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class _UpperCamelCase ( _UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
__a : List[Any] = BertJapaneseTokenizer
__a : Any = False
__a : Optional[int] = True
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
__lowercase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''こんにちは''',
'''こん''',
'''にちは''',
'''ばんは''',
'''##こん''',
'''##にちは''',
'''##ばんは''',
'''世界''',
'''##世界''',
'''、''',
'''##、''',
'''。''',
'''##。''',
]
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> str:
'''simple docstring'''
__lowercase = '''こんにちは、世界。 \nこんばんは、世界。'''
__lowercase = '''こんにちは 、 世界 。 こんばんは 、 世界 。'''
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
__lowercase , __lowercase = self.get_input_output_texts(lowerCAmelCase__ )
__lowercase = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
__lowercase = tokenizer.decode(lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ )
return text, ids
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = self.tokenizer_class(self.vocab_file )
__lowercase = tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''' )
self.assertListEqual(lowerCAmelCase__ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''mecab''' )
self.assertIsNotNone(lowerCAmelCase__ )
__lowercase = '''こんにちは、世界。\nこんばんは、世界。'''
__lowercase = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
__lowercase = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(lowerCAmelCase__ , '''wb''' ) as handle:
pickle.dump(lowerCAmelCase__ , lowerCAmelCase__ )
with open(lowerCAmelCase__ , '''rb''' ) as handle:
__lowercase = pickle.load(lowerCAmelCase__ )
__lowercase = tokenizer_new.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = MecabTokenizer(mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
try:
__lowercase = MecabTokenizer(mecab_dic='''unidic_lite''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
try:
__lowercase = MecabTokenizer(mecab_dic='''unidic''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = MecabTokenizer(do_lower_case=lowerCAmelCase__ , mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
try:
__lowercase = MecabTokenizer(
do_lower_case=lowerCAmelCase__ , normalize_text=lowerCAmelCase__ , mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = MecabTokenizer(normalize_text=lowerCAmelCase__ , mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''] , )
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''sudachi''' )
self.assertIsNotNone(lowerCAmelCase__ )
__lowercase = '''こんにちは、世界。\nこんばんは、世界。'''
__lowercase = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
__lowercase = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(lowerCAmelCase__ , '''wb''' ) as handle:
pickle.dump(lowerCAmelCase__ , lowerCAmelCase__ )
with open(lowerCAmelCase__ , '''rb''' ) as handle:
__lowercase = pickle.load(lowerCAmelCase__ )
__lowercase = tokenizer_new.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = SudachiTokenizer(sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''A''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国''', '''人''', '''参政''', '''権'''] )
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
__lowercase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''B''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人''', '''参政権'''] )
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
__lowercase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''C''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人参政権'''] )
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = SudachiTokenizer(do_lower_case=lowerCAmelCase__ , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase = SudachiTokenizer(normalize_text=lowerCAmelCase__ , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase = SudachiTokenizer(trim_whitespace=lowerCAmelCase__ , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
@require_jumanpp
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''jumanpp''' )
self.assertIsNotNone(lowerCAmelCase__ )
__lowercase = '''こんにちは、世界。\nこんばんは、世界。'''
__lowercase = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
__lowercase = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(lowerCAmelCase__ , '''wb''' ) as handle:
pickle.dump(lowerCAmelCase__ , lowerCAmelCase__ )
with open(lowerCAmelCase__ , '''rb''' ) as handle:
__lowercase = pickle.load(lowerCAmelCase__ )
__lowercase = tokenizer_new.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@require_jumanpp
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = JumanppTokenizer(do_lower_case=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
__lowercase = JumanppTokenizer(normalize_text=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = JumanppTokenizer(trim_whitespace=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''] , )
@require_jumanpp
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''' ) , ['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''] , )
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''']
__lowercase = {}
for i, token in enumerate(lowerCAmelCase__ ):
__lowercase = i
__lowercase = WordpieceTokenizer(vocab=lowerCAmelCase__ , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こんにちは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは''' ) , ['''こん''', '''##ばんは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''' ) , ['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''] )
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''' )
__lowercase = tokenizer.subword_tokenizer
__lowercase = subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''' )
self.assertListEqual(lowerCAmelCase__ , ['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''] )
__lowercase = subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''' )
self.assertListEqual(lowerCAmelCase__ , ['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''] )
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''' )
__lowercase = tokenizer.encode('''ありがとう。''' , add_special_tokens=lowerCAmelCase__ )
__lowercase = tokenizer.encode('''どういたしまして。''' , add_special_tokens=lowerCAmelCase__ )
__lowercase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ )
__lowercase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _UpperCamelCase ( _UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
__a : Union[str, Any] = BertJapaneseTokenizer
__a : Tuple = False
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
super().setUp()
__lowercase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _SCREAMING_SNAKE_CASE ( self , **lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='''character''' , **lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
__lowercase = '''こんにちは、世界。 \nこんばんは、世界。'''
__lowercase = '''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'''
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='''character''' )
__lowercase = tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''' )
self.assertListEqual(
lowerCAmelCase__ , ['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
__lowercase = {}
for i, token in enumerate(lowerCAmelCase__ ):
__lowercase = i
__lowercase = CharacterTokenizer(vocab=lowerCAmelCase__ , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''は'''] )
self.assertListEqual(tokenizer.tokenize('''こんにちほ''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''] )
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''' )
__lowercase = tokenizer.encode('''ありがとう。''' , add_special_tokens=lowerCAmelCase__ )
__lowercase = tokenizer.encode('''どういたしまして。''' , add_special_tokens=lowerCAmelCase__ )
__lowercase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ )
__lowercase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = '''cl-tohoku/bert-base-japanese'''
__lowercase = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = '''cl-tohoku/bert-base-japanese'''
with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm:
BertTokenizer.from_pretrained(lowerCAmelCase__ )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
__lowercase = '''bert-base-cased'''
with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm:
BertJapaneseTokenizer.from_pretrained(lowerCAmelCase__ )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) ) | 210 | 1 |
"""simple docstring"""
def __UpperCamelCase ( lowercase__ : int , lowercase__ : int ) -> str:
'''simple docstring'''
return "\n".join(
f'{number} * {i} = {number * i}' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 365 |
def __UpperCamelCase ( lowercase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ : Any = 0
for ch in input_str:
lowerCAmelCase_ : Any = ord(lowercase__ )
lowerCAmelCase_ : Dict = pow(2 , lowercase__ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 | 0 |
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
__SCREAMING_SNAKE_CASE :str = logging.get_logger(__name__)
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Optional[Any] = """AutoTokenizer"""
_lowerCamelCase : int = ["""tokenizer"""]
_lowerCamelCase : Dict = {
"""semantic_prompt""": 1,
"""coarse_prompt""": 2,
"""fine_prompt""": 2,
}
def __init__( self : List[str] , snake_case_ : int , snake_case_ : Optional[Any]=None ):
super().__init__(snake_case_ )
_UpperCAmelCase = speaker_embeddings
@classmethod
def lowercase ( cls : int , snake_case_ : Optional[int] , snake_case_ : List[Any]="speaker_embeddings_path.json" , **snake_case_ : Dict ):
if speaker_embeddings_dict_path is not None:
_UpperCAmelCase = get_file_from_repo(
snake_case_ , snake_case_ , subfolder=kwargs.pop("subfolder" , snake_case_ ) , cache_dir=kwargs.pop("cache_dir" , snake_case_ ) , force_download=kwargs.pop("force_download" , snake_case_ ) , proxies=kwargs.pop("proxies" , snake_case_ ) , resume_download=kwargs.pop("resume_download" , snake_case_ ) , local_files_only=kwargs.pop("local_files_only" , snake_case_ ) , use_auth_token=kwargs.pop("use_auth_token" , snake_case_ ) , revision=kwargs.pop("revision" , snake_case_ ) , )
if speaker_embeddings_path is None:
logger.warning(
f'`{os.path.join(snake_case_ , snake_case_ )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' )
_UpperCAmelCase = None
else:
with open(snake_case_ ) as speaker_embeddings_json:
_UpperCAmelCase = json.load(snake_case_ )
else:
_UpperCAmelCase = None
_UpperCAmelCase = AutoTokenizer.from_pretrained(snake_case_ , **snake_case_ )
return cls(tokenizer=snake_case_ , speaker_embeddings=snake_case_ )
def lowercase ( self : Tuple , snake_case_ : List[Any] , snake_case_ : Dict="speaker_embeddings_path.json" , snake_case_ : List[str]="speaker_embeddings" , snake_case_ : bool = False , **snake_case_ : Any , ):
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(snake_case_ , snake_case_ , "v2" ) , exist_ok=snake_case_ )
_UpperCAmelCase = {}
_UpperCAmelCase = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
_UpperCAmelCase = self._load_voice_preset(snake_case_ )
_UpperCAmelCase = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["repo_or_path"] , snake_case_ , f'{prompt_key}_{key}' ) , voice_preset[key] , allow_pickle=snake_case_ , )
_UpperCAmelCase = os.path.join(snake_case_ , f'{prompt_key}_{key}.npy' )
_UpperCAmelCase = tmp_dict
with open(os.path.join(snake_case_ , snake_case_ ) , "w" ) as fp:
json.dump(snake_case_ , snake_case_ )
super().save_pretrained(snake_case_ , snake_case_ , **snake_case_ )
def lowercase ( self : str , snake_case_ : str = None , **snake_case_ : Tuple ):
_UpperCAmelCase = self.speaker_embeddings[voice_preset]
_UpperCAmelCase = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' )
_UpperCAmelCase = get_file_from_repo(
self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] , subfolder=kwargs.pop("subfolder" , snake_case_ ) , cache_dir=kwargs.pop("cache_dir" , snake_case_ ) , force_download=kwargs.pop("force_download" , snake_case_ ) , proxies=kwargs.pop("proxies" , snake_case_ ) , resume_download=kwargs.pop("resume_download" , snake_case_ ) , local_files_only=kwargs.pop("local_files_only" , snake_case_ ) , use_auth_token=kwargs.pop("use_auth_token" , snake_case_ ) , revision=kwargs.pop("revision" , snake_case_ ) , )
if path is None:
raise ValueError(
f'`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' )
_UpperCAmelCase = np.load(snake_case_ )
return voice_preset_dict
def lowercase ( self : List[Any] , snake_case_ : Optional[dict] = None ):
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f'Voice preset unrecognized, missing {key} as a key.' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(f'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
def __call__( self : List[Any] , snake_case_ : Tuple=None , snake_case_ : Any=None , snake_case_ : Any="pt" , snake_case_ : List[str]=2_5_6 , snake_case_ : str=False , snake_case_ : Dict=True , snake_case_ : str=False , **snake_case_ : Optional[int] , ):
if voice_preset is not None and not isinstance(snake_case_ , snake_case_ ):
if (
isinstance(snake_case_ , snake_case_ )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
_UpperCAmelCase = self._load_voice_preset(snake_case_ )
else:
if isinstance(snake_case_ , snake_case_ ) and not voice_preset.endswith(".npz" ):
_UpperCAmelCase = voice_preset + ".npz"
_UpperCAmelCase = np.load(snake_case_ )
if voice_preset is not None:
self._validate_voice_preset_dict(snake_case_ , **snake_case_ )
_UpperCAmelCase = BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
_UpperCAmelCase = self.tokenizer(
snake_case_ , return_tensors=snake_case_ , padding="max_length" , max_length=snake_case_ , return_attention_mask=snake_case_ , return_token_type_ids=snake_case_ , add_special_tokens=snake_case_ , **snake_case_ , )
if voice_preset is not None:
_UpperCAmelCase = voice_preset
return encoded_text
| 22 |
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
UpperCamelCase = {
'''b0''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def SCREAMING_SNAKE_CASE( __lowercase ) -> Dict:
A: Tuple = EfficientNetConfig()
A: Optional[int] = CONFIG_MAP[model_name]['''hidden_dim''']
A: Optional[int] = CONFIG_MAP[model_name]['''width_coef''']
A: str = CONFIG_MAP[model_name]['''depth_coef''']
A: Dict = CONFIG_MAP[model_name]['''image_size''']
A: str = CONFIG_MAP[model_name]['''dropout_rate''']
A: Optional[Any] = CONFIG_MAP[model_name]['''dw_padding''']
A: Optional[Any] = '''huggingface/label-files'''
A: List[str] = '''imagenet-1k-id2label.json'''
A: Dict = 1_0_0_0
A: Any = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='''dataset''' ) , '''r''' ) )
A: Tuple = {int(__lowercase ): v for k, v in idalabel.items()}
A: int = idalabel
A: Tuple = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE( ) -> Any:
A: Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A: Union[str, Any] = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
def SCREAMING_SNAKE_CASE( __lowercase ) -> Tuple:
A: List[str] = CONFIG_MAP[model_name]['''image_size''']
A: List[Any] = EfficientNetImageProcessor(
size={'''height''': size, '''width''': size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=__lowercase , )
return preprocessor
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]:
A: List[str] = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )]
A: List[str] = sorted(set(__lowercase ) )
A: Dict = len(__lowercase )
A: List[str] = {b: str(__lowercase ) for b, i in zip(__lowercase , range(__lowercase ) )}
A: Optional[int] = []
rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') )
rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') )
rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') )
rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') )
rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') )
for b in block_names:
A: int = block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') )
rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') )
rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') )
rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') )
rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') )
A: Union[str, Any] = {}
for item in rename_keys:
if item[0] in original_param_names:
A: str = '''efficientnet.''' + item[1]
A: int = '''classifier.weight'''
A: Tuple = '''classifier.bias'''
return key_mapping
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Tuple:
for key, value in tf_params.items():
if "normalization" in key:
continue
A: Union[str, Any] = key_mapping[key]
if "_conv" in key and "kernel" in key:
A: List[str] = torch.from_numpy(__lowercase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
A: List[Any] = torch.from_numpy(__lowercase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
A: Optional[Any] = torch.from_numpy(np.transpose(__lowercase ) )
else:
A: Any = torch.from_numpy(__lowercase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(__lowercase )
@torch.no_grad()
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase ) -> Tuple:
A: Optional[int] = model_classes[model_name](
include_top=__lowercase , weights='''imagenet''' , input_tensor=__lowercase , input_shape=__lowercase , pooling=__lowercase , classes=1_0_0_0 , classifier_activation='''softmax''' , )
A: List[str] = original_model.trainable_variables
A: Optional[Any] = original_model.non_trainable_variables
A: Union[str, Any] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
A: int = param.numpy()
A: Tuple = list(tf_params.keys() )
# Load HuggingFace model
A: Dict = get_efficientnet_config(__lowercase )
A: Union[str, Any] = EfficientNetForImageClassification(__lowercase ).eval()
A: Dict = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('''Converting parameters...''' )
A: int = rename_keys(__lowercase )
replace_params(__lowercase , __lowercase , __lowercase )
# Initialize preprocessor and preprocess input image
A: List[Any] = convert_image_processor(__lowercase )
A: Optional[Any] = preprocessor(images=prepare_img() , return_tensors='''pt''' )
# HF model inference
hf_model.eval()
with torch.no_grad():
A: str = hf_model(**__lowercase )
A: List[Any] = outputs.logits.detach().numpy()
# Original model inference
A: Any = False
A: List[Any] = CONFIG_MAP[model_name]['''image_size''']
A: List[Any] = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
A: str = image.img_to_array(__lowercase )
A: Dict = np.expand_dims(__lowercase , axis=0 )
A: Any = original_model.predict(__lowercase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(__lowercase , __lowercase , atol=1E-3 ), "The predicted logits are not the same."
print('''Model outputs match!''' )
if save_model:
# Create folder to save model
if not os.path.isdir(__lowercase ):
os.mkdir(__lowercase )
# Save converted model and image processor
hf_model.save_pretrained(__lowercase )
preprocessor.save_pretrained(__lowercase )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
A: int = F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(__lowercase )
hf_model.push_to_hub(__lowercase )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
UpperCamelCase = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 319 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase_ = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""MRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MraForMaskedLM""",
"""MraForMultipleChoice""",
"""MraForQuestionAnswering""",
"""MraForSequenceClassification""",
"""MraForTokenClassification""",
"""MraLayer""",
"""MraModel""",
"""MraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure) | 359 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
def UpperCamelCase ( UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
if "resnet-50" in model_name:
a_ = ResNetConfig.from_pretrained("microsoft/resnet-50" )
elif "resnet-101" in model_name:
a_ = ResNetConfig.from_pretrained("microsoft/resnet-101" )
else:
raise ValueError("Model name should include either resnet50 or resnet101" )
a_ = DetrConfig(use_timm_backbone=UpperCAmelCase , backbone_config=UpperCAmelCase )
# set label attributes
a_ = "panoptic" in model_name
if is_panoptic:
a_ = 250
else:
a_ = 91
a_ = "huggingface/label-files"
a_ = "coco-detection-id2label.json"
a_ = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type="dataset" ) , "r" ) )
a_ = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
a_ = idalabel
a_ = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def UpperCamelCase ( UpperCAmelCase ) ->List[str]:
"""simple docstring"""
a_ = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight") )
rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight") )
rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias") )
rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean") )
rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''',
F'''encoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''',
F'''decoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
) )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
) )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
] )
return rename_keys
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=False ) ->Optional[Any]:
"""simple docstring"""
a_ = ""
if is_panoptic:
a_ = "detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
a_ = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
a_ = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
a_ = in_proj_weight[:256, :]
a_ = in_proj_bias[:256]
a_ = in_proj_weight[256:512, :]
a_ = in_proj_bias[256:512]
a_ = in_proj_weight[-256:, :]
a_ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
a_ = in_proj_weight[:256, :]
a_ = in_proj_bias[:256]
a_ = in_proj_weight[256:512, :]
a_ = in_proj_bias[256:512]
a_ = in_proj_weight[-256:, :]
a_ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
a_ = state_dict.pop(
F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
a_ = in_proj_weight_cross_attn[:256, :]
a_ = in_proj_bias_cross_attn[:256]
a_ = in_proj_weight_cross_attn[256:512, :]
a_ = in_proj_bias_cross_attn[256:512]
a_ = in_proj_weight_cross_attn[-256:, :]
a_ = in_proj_bias_cross_attn[-256:]
def UpperCamelCase ( ) ->Dict:
"""simple docstring"""
a_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
a_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=False ) ->List[str]:
"""simple docstring"""
a_ , a_ = get_detr_config(UpperCAmelCase )
# load original model from torch hub
a_ = {
"detr-resnet-50": "detr_resnet50",
"detr-resnet-101": "detr_resnet101",
}
logger.info(F'''Converting model {model_name}...''' )
a_ = torch.hub.load("facebookresearch/detr" , model_name_to_original_name[model_name] , pretrained=UpperCAmelCase ).eval()
a_ = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(UpperCAmelCase ):
if is_panoptic:
a_ = "detr." + src
rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCAmelCase , is_panoptic=UpperCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
a_ = "detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
# finally, create HuggingFace model and load state dict
a_ = DetrForSegmentation(UpperCAmelCase ) if is_panoptic else DetrForObjectDetection(UpperCAmelCase )
model.load_state_dict(UpperCAmelCase )
model.eval()
# verify our conversion on an image
a_ = "coco_panoptic" if is_panoptic else "coco_detection"
a_ = DetrImageProcessor(format=UpperCAmelCase )
a_ = processor(images=prepare_img() , return_tensors="pt" )
a_ = encoding["pixel_values"]
a_ = detr(UpperCAmelCase )
a_ = model(UpperCAmelCase )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
model.save_pretrained(UpperCAmelCase )
processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("Uploading PyTorch model and image processor to the hub..." )
model.push_to_hub(F'''nielsr/{model_name}''' )
processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='detr-resnet-50',
type=str,
choices=['detr-resnet-50', 'detr-resnet-101'],
help='Name of the DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.')
UpperCamelCase_ = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 303 | 0 |
"""simple docstring"""
UpperCAmelCase : Optional[Any] = 8.31_44_62 # Unit - J mol-1 K-1
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 136 |
"""simple docstring"""
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True ) -> List[Any]:
'''simple docstring'''
model.train()
lowercase_ = model(__lowerCAmelCase )
lowercase_ = F.mse_loss(__lowerCAmelCase , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase=False ) -> List[Any]:
'''simple docstring'''
set_seed(42 )
lowercase_ = RegressionModel()
lowercase_ = deepcopy(__lowerCAmelCase )
lowercase_ = RegressionDataset(length=80 )
lowercase_ = DataLoader(__lowerCAmelCase , batch_size=16 )
model.to(accelerator.device )
if sched:
lowercase_ = AdamW(params=model.parameters() , lr=1E-3 )
lowercase_ = AdamW(params=ddp_model.parameters() , lr=1E-3 )
lowercase_ = LambdaLR(__lowerCAmelCase , lr_lambda=lambda __lowerCAmelCase : epoch**0.65 )
lowercase_ = LambdaLR(__lowerCAmelCase , lr_lambda=lambda __lowerCAmelCase : epoch**0.65 )
# Make a copy of `model`
if sched:
lowercase_ , lowercase_ , lowercase_ , lowercase_ = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
lowercase_ , lowercase_ = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ = get_training_setup(__lowerCAmelCase )
# Use a single batch
lowercase_ , lowercase_ = next(iter(__lowerCAmelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowercase_ , lowercase_ = accelerator.gather((ddp_input, ddp_target) )
lowercase_ , lowercase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__lowerCAmelCase ):
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
# Sync grads
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
lowercase_ = ddp_input[torch.randperm(len(__lowerCAmelCase ) )]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ = get_training_setup(__lowerCAmelCase )
# Use a single batch
lowercase_ , lowercase_ = next(iter(__lowerCAmelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowercase_ , lowercase_ = accelerator.gather((ddp_input, ddp_target) )
lowercase_ , lowercase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__lowerCAmelCase ):
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
# Sync grads
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
lowercase_ = ddp_input[torch.randperm(len(__lowerCAmelCase ) )]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase=False , __lowerCAmelCase=False ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = Accelerator(
split_batches=__lowerCAmelCase , dispatch_batches=__lowerCAmelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowercase_ , lowercase_ , lowercase_ = get_training_setup(__lowerCAmelCase )
for iteration, batch in enumerate(__lowerCAmelCase ):
lowercase_ , lowercase_ = batch.values()
# Gather the distributed inputs and targs for the base model
lowercase_ , lowercase_ = accelerator.gather((ddp_input, ddp_target) )
lowercase_ , lowercase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(__lowerCAmelCase ):
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(__lowerCAmelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
lowercase_ = ddp_input[torch.randperm(len(__lowerCAmelCase ) )]
GradientState._reset_state()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase=False , __lowerCAmelCase=False ) -> Optional[int]:
'''simple docstring'''
lowercase_ = Accelerator(
split_batches=__lowerCAmelCase , dispatch_batches=__lowerCAmelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = get_training_setup(__lowerCAmelCase , __lowerCAmelCase )
for iteration, batch in enumerate(__lowerCAmelCase ):
lowercase_ , lowercase_ = batch.values()
# Gather the distributed inputs and targs for the base model
lowercase_ , lowercase_ = accelerator.gather((ddp_input, ddp_target) )
lowercase_ , lowercase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(__lowerCAmelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(__lowerCAmelCase ):
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
lowercase_ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(__lowerCAmelCase ))
if accelerator.num_processes > 1:
check_model_parameters(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
GradientState._reset_state()
def _SCREAMING_SNAKE_CASE () -> Optional[Any]:
'''simple docstring'''
lowercase_ = Accelerator()
lowercase_ = RegressionDataset(length=80 )
lowercase_ = DataLoader(__lowerCAmelCase , batch_size=16 )
lowercase_ = RegressionDataset(length=96 )
lowercase_ = DataLoader(__lowerCAmelCase , batch_size=16 )
lowercase_ , lowercase_ = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(__lowerCAmelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__lowerCAmelCase )
if iteration < len(__lowerCAmelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(__lowerCAmelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__lowerCAmelCase )
if batch_num < len(__lowerCAmelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def _SCREAMING_SNAKE_CASE () -> List[str]:
'''simple docstring'''
lowercase_ = Accelerator()
lowercase_ = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(__lowerCAmelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(__lowerCAmelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """ , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(__lowerCAmelCase , __lowerCAmelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(__lowerCAmelCase , __lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> str:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 136 | 1 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = ['image_processor', 'tokenizer']
lowercase = 'CLIPImageProcessor'
lowercase = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self : Union[str, Any] , lowerCamelCase : List[str]=None , lowerCamelCase : Union[str, Any]=None , **lowerCamelCase : List[Any] ) -> Optional[Any]:
lowerCAmelCase_ : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowerCamelCase , )
lowerCAmelCase_ : Optional[int] = kwargs.pop("""feature_extractor""" )
lowerCAmelCase_ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowerCamelCase , lowerCamelCase )
def __call__( self : Optional[Any] , lowerCamelCase : List[Any]=None , lowerCamelCase : Tuple=None , lowerCamelCase : int=None , **lowerCamelCase : int ) -> Optional[int]:
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
lowerCAmelCase_ : int = self.tokenizer(lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )
if images is not None:
lowerCAmelCase_ : int = self.image_processor(lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )
if text is not None and images is not None:
lowerCAmelCase_ : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase ) , tensor_type=lowerCamelCase )
def __lowercase ( self : int , *lowerCamelCase : Optional[Any] , **lowerCamelCase : Union[str, Any] ) -> List[str]:
return self.tokenizer.batch_decode(*lowerCamelCase , **lowerCamelCase )
def __lowercase ( self : Optional[int] , *lowerCamelCase : str , **lowerCamelCase : Union[str, Any] ) -> List[str]:
return self.tokenizer.decode(*lowerCamelCase , **lowerCamelCase )
@property
def __lowercase ( self : Optional[Any] ) -> int:
lowerCAmelCase_ : List[str] = self.tokenizer.model_input_names
lowerCAmelCase_ : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 89 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __snake_case ( unittest.TestCase):
"""simple docstring"""
@property
def __lowercase ( self : str ) -> List[Any]:
torch.manual_seed(0 )
lowerCAmelCase_ : Optional[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def __lowercase ( self : Tuple ) -> Optional[Any]:
lowerCAmelCase_ : Optional[int] = self.dummy_uncond_unet
lowerCAmelCase_ : Tuple = PNDMScheduler()
lowerCAmelCase_ : List[Any] = PNDMPipeline(unet=lowerCamelCase , scheduler=lowerCamelCase )
pndm.to(lowerCamelCase )
pndm.set_progress_bar_config(disable=lowerCamelCase )
lowerCAmelCase_ : Dict = torch.manual_seed(0 )
lowerCAmelCase_ : List[Any] = pndm(generator=lowerCamelCase , num_inference_steps=20 , output_type="""numpy""" ).images
lowerCAmelCase_ : str = torch.manual_seed(0 )
lowerCAmelCase_ : int = pndm(generator=lowerCamelCase , num_inference_steps=20 , output_type="""numpy""" , return_dict=lowerCamelCase )[0]
lowerCAmelCase_ : Optional[int] = image[0, -3:, -3:, -1]
lowerCAmelCase_ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase_ : int = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class __snake_case ( unittest.TestCase):
"""simple docstring"""
def __lowercase ( self : str ) -> Tuple:
lowerCAmelCase_ : str = """google/ddpm-cifar10-32"""
lowerCAmelCase_ : Dict = UNetaDModel.from_pretrained(lowerCamelCase )
lowerCAmelCase_ : Dict = PNDMScheduler()
lowerCAmelCase_ : Union[str, Any] = PNDMPipeline(unet=lowerCamelCase , scheduler=lowerCamelCase )
pndm.to(lowerCamelCase )
pndm.set_progress_bar_config(disable=lowerCamelCase )
lowerCAmelCase_ : Any = torch.manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = pndm(generator=lowerCamelCase , output_type="""numpy""" ).images
lowerCAmelCase_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase_ : List[Any] = np.array([0.1_564, 0.14_645, 0.1_406, 0.14_715, 0.12_425, 0.14_045, 0.13_115, 0.12_175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 89 | 1 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class _A ( datasets.BuilderConfig ):
_UpperCamelCase : Optional[datasets.Features] = None
class _A ( datasets.ArrowBasedBuilder ):
_UpperCamelCase : Tuple = PandasConfig
def __a ( self : Any ) -> List[Any]:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def __a ( self : str , _A : Dict ) -> int:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
lowercase : Any = dl_manager.download_and_extract(self.config.data_files )
if isinstance(snake_case__ , (str, list, tuple) ):
lowercase : List[Any] = data_files
if isinstance(snake_case__ , snake_case__ ):
lowercase : Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowercase : List[str] = [dl_manager.iter_files(snake_case__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
lowercase : Optional[Any] = []
for split_name, files in data_files.items():
if isinstance(snake_case__ , snake_case__ ):
lowercase : Optional[int] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowercase : List[Any] = [dl_manager.iter_files(snake_case__ ) for file in files]
splits.append(datasets.SplitGenerator(name=snake_case__ , gen_kwargs={'''files''': files} ) )
return splits
def __a ( self : Optional[int] , _A : int ) -> Dict:
"""simple docstring"""
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
lowercase : Union[str, Any] = table_cast(snake_case__ , self.config.features.arrow_schema )
return pa_table
def __a ( self : Optional[Any] , _A : Dict ) -> str:
"""simple docstring"""
for i, file in enumerate(itertools.chain.from_iterable(snake_case__ ) ):
with open(snake_case__ , '''rb''' ) as f:
lowercase : int = pa.Table.from_pandas(pd.read_pickle(snake_case__ ) )
yield i, self._cast_table(snake_case__ ) | 308 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
while b:
lowerCAmelCase , lowerCAmelCase : Any = b, a % b
return a
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return a if b == 0 else euclidean_gcd_recursive(SCREAMING_SNAKE_CASE , a % b )
def a__ ( ):
'''simple docstring'''
print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" )
print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" )
print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" )
print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" )
print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" )
print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" )
print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" )
print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" )
if __name__ == "__main__":
main()
| 108 | 0 |
'''simple docstring'''
from collections import deque
from math import floor
from random import random
from time import time
class lowercase :
"""simple docstring"""
def __init__( self ) -> Dict:
_UpperCAmelCase : int = {}
def _snake_case ( self ,a_ ,a_ ,a_=1 ) -> Optional[int]:
if self.graph.get(a_ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
_UpperCAmelCase : Tuple = [[w, v]]
if not self.graph.get(a_ ):
_UpperCAmelCase : Optional[Any] = []
def _snake_case ( self ) -> Optional[Any]:
return list(self.graph )
def _snake_case ( self ,a_ ,a_ ) -> int:
if self.graph.get(a_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(a_ )
def _snake_case ( self ,a_=-2 ,a_=-1 ) -> Any:
if s == d:
return []
_UpperCAmelCase : Dict = []
_UpperCAmelCase : List[Any] = []
if s == -2:
_UpperCAmelCase : Dict = list(self.graph )[0]
stack.append(a_ )
visited.append(a_ )
_UpperCAmelCase : List[str] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : Any = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(a_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(a_ ) != 0:
_UpperCAmelCase : Optional[Any] = stack[len(a_ ) - 1]
else:
_UpperCAmelCase : int = ss
# check if se have reached the starting point
if len(a_ ) == 0:
return visited
def _snake_case ( self ,a_=-1 ) -> Union[str, Any]:
if c == -1:
_UpperCAmelCase : str = floor(random() * 10_000 ) + 10
for i in range(a_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
_UpperCAmelCase : Optional[int] = floor(random() * c ) + 1
if n != i:
self.add_pair(a_ ,a_ ,1 )
def _snake_case ( self ,a_=-2 ) -> str:
_UpperCAmelCase : Any = deque()
_UpperCAmelCase : int = []
if s == -2:
_UpperCAmelCase : Dict = list(self.graph )[0]
d.append(a_ )
visited.append(a_ )
while d:
_UpperCAmelCase : Optional[Any] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _snake_case ( self ,a_ ) -> Optional[int]:
_UpperCAmelCase : List[str] = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _snake_case ( self ,a_ ) -> Optional[Any]:
return len(self.graph[u] )
def _snake_case ( self ,a_=-2 ) -> int:
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : Dict = []
if s == -2:
_UpperCAmelCase : Optional[int] = list(self.graph )[0]
stack.append(a_ )
visited.append(a_ )
_UpperCAmelCase : int = s
_UpperCAmelCase : int = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : Any = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(a_ ) != 0:
_UpperCAmelCase : Any = stack[len(a_ ) - 1]
else:
_UpperCAmelCase : Union[str, Any] = ss
# check if se have reached the starting point
if len(a_ ) == 0:
return sorted_nodes
def _snake_case ( self ) -> Dict:
_UpperCAmelCase : Tuple = []
_UpperCAmelCase : Union[str, Any] = []
_UpperCAmelCase : int = list(self.graph )[0]
stack.append(a_ )
visited.append(a_ )
_UpperCAmelCase : Union[str, Any] = -2
_UpperCAmelCase : str = []
_UpperCAmelCase : Union[str, Any] = s
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : Tuple = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : str = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase : List[Any] = len(a_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase : Tuple = True
if len(a_ ) != 0:
_UpperCAmelCase : str = stack[len(a_ ) - 1]
else:
_UpperCAmelCase : Any = False
indirect_parents.append(a_ )
_UpperCAmelCase : Optional[int] = s
_UpperCAmelCase : List[Any] = ss
# check if se have reached the starting point
if len(a_ ) == 0:
return list(a_ )
def _snake_case ( self ) -> Dict:
_UpperCAmelCase : Union[str, Any] = []
_UpperCAmelCase : Tuple = []
_UpperCAmelCase : Optional[Any] = list(self.graph )[0]
stack.append(a_ )
visited.append(a_ )
_UpperCAmelCase : Dict = -2
_UpperCAmelCase : Dict = []
_UpperCAmelCase : Dict = s
_UpperCAmelCase : str = False
_UpperCAmelCase : Union[str, Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : Union[str, Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase : int = len(a_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase : Dict = True
if len(a_ ) != 0:
_UpperCAmelCase : Optional[int] = stack[len(a_ ) - 1]
else:
_UpperCAmelCase : Union[str, Any] = False
indirect_parents.append(a_ )
_UpperCAmelCase : Any = s
_UpperCAmelCase : Any = ss
# check if se have reached the starting point
if len(a_ ) == 0:
return False
def _snake_case ( self ,a_=-2 ,a_=-1 ) -> Optional[int]:
_UpperCAmelCase : Union[str, Any] = time()
self.dfs(a_ ,a_ )
_UpperCAmelCase : Dict = time()
return end - begin
def _snake_case ( self ,a_=-2 ) -> int:
_UpperCAmelCase : int = time()
self.bfs(a_ )
_UpperCAmelCase : Union[str, Any] = time()
return end - begin
class lowercase :
"""simple docstring"""
def __init__( self ) -> str:
_UpperCAmelCase : List[Any] = {}
def _snake_case ( self ,a_ ,a_ ,a_=1 ) -> Union[str, Any]:
# check if the u exists
if self.graph.get(a_ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
_UpperCAmelCase : Optional[int] = [[w, v]]
# add the other way
if self.graph.get(a_ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
_UpperCAmelCase : Optional[int] = [[w, u]]
def _snake_case ( self ,a_ ,a_ ) -> Optional[Any]:
if self.graph.get(a_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(a_ )
# the other way round
if self.graph.get(a_ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(a_ )
def _snake_case ( self ,a_=-2 ,a_=-1 ) -> Tuple:
if s == d:
return []
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : Tuple = []
if s == -2:
_UpperCAmelCase : Dict = list(self.graph )[0]
stack.append(a_ )
visited.append(a_ )
_UpperCAmelCase : List[str] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : Optional[int] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(a_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(a_ ) != 0:
_UpperCAmelCase : Tuple = stack[len(a_ ) - 1]
else:
_UpperCAmelCase : int = ss
# check if se have reached the starting point
if len(a_ ) == 0:
return visited
def _snake_case ( self ,a_=-1 ) -> List[Any]:
if c == -1:
_UpperCAmelCase : Optional[int] = floor(random() * 10_000 ) + 10
for i in range(a_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
_UpperCAmelCase : List[Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(a_ ,a_ ,1 )
def _snake_case ( self ,a_=-2 ) -> int:
_UpperCAmelCase : Optional[int] = deque()
_UpperCAmelCase : Any = []
if s == -2:
_UpperCAmelCase : Tuple = list(self.graph )[0]
d.append(a_ )
visited.append(a_ )
while d:
_UpperCAmelCase : str = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _snake_case ( self ,a_ ) -> Tuple:
return len(self.graph[u] )
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : Optional[int] = []
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : Union[str, Any] = list(self.graph )[0]
stack.append(a_ )
visited.append(a_ )
_UpperCAmelCase : Tuple = -2
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : Optional[Any] = s
_UpperCAmelCase : int = False
_UpperCAmelCase : Any = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : Dict = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase : Union[str, Any] = len(a_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : Dict = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase : Any = True
if len(a_ ) != 0:
_UpperCAmelCase : Any = stack[len(a_ ) - 1]
else:
_UpperCAmelCase : Tuple = False
indirect_parents.append(a_ )
_UpperCAmelCase : Dict = s
_UpperCAmelCase : str = ss
# check if se have reached the starting point
if len(a_ ) == 0:
return list(a_ )
def _snake_case ( self ) -> Union[str, Any]:
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : Tuple = list(self.graph )[0]
stack.append(a_ )
visited.append(a_ )
_UpperCAmelCase : Dict = -2
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : str = s
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : Optional[int] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase : List[str] = len(a_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase : Optional[int] = True
if len(a_ ) != 0:
_UpperCAmelCase : Tuple = stack[len(a_ ) - 1]
else:
_UpperCAmelCase : Dict = False
indirect_parents.append(a_ )
_UpperCAmelCase : List[str] = s
_UpperCAmelCase : Union[str, Any] = ss
# check if se have reached the starting point
if len(a_ ) == 0:
return False
def _snake_case ( self ) -> List[Any]:
return list(self.graph )
def _snake_case ( self ,a_=-2 ,a_=-1 ) -> Optional[int]:
_UpperCAmelCase : Any = time()
self.dfs(a_ ,a_ )
_UpperCAmelCase : Optional[int] = time()
return end - begin
def _snake_case ( self ,a_=-2 ) -> Dict:
_UpperCAmelCase : Dict = time()
self.bfs(a_ )
_UpperCAmelCase : Union[str, Any] = time()
return end - begin
| 349 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A_ : str = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[int] = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
A_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 349 | 1 |
"""simple docstring"""
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ ):
"""simple docstring"""
@require_torch
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
lowerCAmelCase : Optional[int] = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
lowerCAmelCase : int = "\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn\'t access internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
lowerCAmelCase : str = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(__snake_case )
BertModel.from_pretrained(__snake_case )
BertTokenizer.from_pretrained(__snake_case )
pipeline(task="fill-mask" , model=__snake_case )
# baseline - just load from_pretrained with normal network
lowerCAmelCase : List[Any] = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
lowerCAmelCase : Any = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowerCAmelCase : Optional[Any] = "1"
lowerCAmelCase : Dict = subprocess.run(__snake_case , env=__snake_case , check=__snake_case , capture_output=__snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
lowerCAmelCase : List[str] = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
lowerCAmelCase : str = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
lowerCAmelCase : Optional[Any] = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(__snake_case )
BertModel.from_pretrained(__snake_case )
BertTokenizer.from_pretrained(__snake_case )
pipeline(task="fill-mask" , model=__snake_case )
# baseline - just load from_pretrained with normal network
lowerCAmelCase : str = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
lowerCAmelCase : List[Any] = self.get_env()
lowerCAmelCase : str = subprocess.run(__snake_case , env=__snake_case , check=__snake_case , capture_output=__snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = "\nfrom transformers import BertConfig, BertModel, BertTokenizer\n "
lowerCAmelCase : Dict = "\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n "
lowerCAmelCase : Optional[int] = "\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
# baseline - just load from_pretrained with normal network
lowerCAmelCase : str = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
lowerCAmelCase : List[Any] = self.get_env()
lowerCAmelCase : Optional[int] = subprocess.run(__snake_case , env=__snake_case , check=__snake_case , capture_output=__snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# next emulate no network
lowerCAmelCase : str = [sys.executable, "-c", "\n".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowerCAmelCase : int = "1"
lowerCAmelCase : int = subprocess.run(__snake_case , env=__snake_case , check=__snake_case , capture_output=__snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = "\nfrom transformers import pipeline\n "
lowerCAmelCase : Any = "\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n "
lowerCAmelCase : List[str] = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
lowerCAmelCase : List[Any] = self.get_env()
lowerCAmelCase : Dict = "1"
lowerCAmelCase : Dict = [sys.executable, "-c", "\n".join([load, mock, run] )]
lowerCAmelCase : Optional[Any] = subprocess.run(__snake_case , env=__snake_case , check=__snake_case , capture_output=__snake_case )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"You cannot infer task automatically within `pipeline` when using offline mode" , result.stderr.decode().replace("\n" , "" ) , )
@require_torch
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = "\nfrom transformers import AutoModel\n "
lowerCAmelCase : List[Any] = "\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n "
# baseline - just load from_pretrained with normal network
lowerCAmelCase : str = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
lowerCAmelCase : List[Any] = self.get_env()
lowerCAmelCase : str = subprocess.run(__snake_case , env=__snake_case , check=__snake_case , capture_output=__snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowerCAmelCase : Dict = "1"
lowerCAmelCase : Any = subprocess.run(__snake_case , env=__snake_case , check=__snake_case , capture_output=__snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
| 108 |
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_lowerCAmelCase : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCamelCase_( _snake_case : Union[List, PIL.Image.Image, torch.Tensor] ):
"""simple docstring"""
warnings.warn(
'The preprocess method is deprecated and will be removed in a future version. Please'
' use VaeImageProcessor.preprocess instead' , _snake_case , )
if isinstance(_snake_case , torch.Tensor ):
return image
elif isinstance(_snake_case , PIL.Image.Image ):
__a =[image]
if isinstance(image[0] , PIL.Image.Image ):
__a , __a =image[0].size
__a , __a =(x - x % 8 for x in (w, h)) # resize to integer multiple of 8
__a =[np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
__a =np.concatenate(_snake_case , axis=0 )
__a =np.array(_snake_case ).astype(np.floataa ) / 255.0
__a =image.transpose(0 , 3 , 1 , 2 )
__a =2.0 * image - 1.0
__a =torch.from_numpy(_snake_case )
elif isinstance(image[0] , torch.Tensor ):
__a =torch.cat(_snake_case , dim=0 )
return image
def UpperCamelCase_( _snake_case : Union[List, PIL.Image.Image, torch.Tensor] ):
"""simple docstring"""
if isinstance(_snake_case , torch.Tensor ):
return mask
elif isinstance(_snake_case , PIL.Image.Image ):
__a =[mask]
if isinstance(mask[0] , PIL.Image.Image ):
__a , __a =mask[0].size
__a , __a =(x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__a =[np.array(m.convert('L' ).resize((w, h) , resample=PIL_INTERPOLATION['nearest'] ) )[None, :] for m in mask]
__a =np.concatenate(_snake_case , axis=0 )
__a =mask.astype(np.floataa ) / 255.0
__a =0
__a =1
__a =torch.from_numpy(_snake_case )
elif isinstance(mask[0] , torch.Tensor ):
__a =torch.cat(_snake_case , dim=0 )
return mask
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
def __init__( self , __snake_case , __snake_case ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=__snake_case , scheduler=__snake_case )
@torch.no_grad()
def __call__( self , __snake_case , __snake_case , __snake_case = 250 , __snake_case = 0.0 , __snake_case = 10 , __snake_case = 10 , __snake_case = None , __snake_case = "pil" , __snake_case = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
__a =image
__a =_preprocess_image(__snake_case )
__a =original_image.to(device=self.device , dtype=self.unet.dtype )
__a =_preprocess_mask(__snake_case )
__a =mask_image.to(device=self.device , dtype=self.unet.dtype )
__a =original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__snake_case , __snake_case ) and len(__snake_case ) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(__snake_case )}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
__a =original_image.shape
__a =randn_tensor(__snake_case , generator=__snake_case , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__snake_case , __snake_case , __snake_case , self.device )
__a =eta
__a =self.scheduler.timesteps[0] + 1
__a =generator[0] if isinstance(__snake_case , __snake_case ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
__a =self.unet(__snake_case , __snake_case ).sample
# compute previous image: x_t -> x_t-1
__a =self.scheduler.step(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
__a =self.scheduler.undo_step(__snake_case , __snake_case , __snake_case )
__a =t
__a =(image / 2 + 0.5).clamp(0 , 1 )
__a =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a =self.numpy_to_pil(__snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__snake_case )
| 218 | 0 |
"""simple docstring"""
def _lowerCAmelCase ( UpperCAmelCase__ : int = 1_0_0 ) ->int:
A__ : Union[str, Any] = n * (n + 1) * (2 * n + 1) / 6
A__ : Tuple = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'{solution() = }')
| 356 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A_ = logging.get_logger(__name__)
A_ = {
'''microsoft/table-transformer-detection''': (
'''https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'''
),
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = 'table-transformer'
snake_case_ = ['past_key_values']
snake_case_ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Dict , snake_case : int=True , snake_case : Dict=None , snake_case : Union[str, Any]=3 , snake_case : Dict=100 , snake_case : Tuple=6 , snake_case : Optional[int]=2048 , snake_case : int=8 , snake_case : Dict=6 , snake_case : Any=2048 , snake_case : str=8 , snake_case : Union[str, Any]=0.0 , snake_case : List[str]=0.0 , snake_case : List[str]=True , snake_case : Any="relu" , snake_case : str=256 , snake_case : int=0.1 , snake_case : Dict=0.0 , snake_case : str=0.0 , snake_case : Union[str, Any]=0.02 , snake_case : Union[str, Any]=1.0 , snake_case : Optional[Any]=False , snake_case : int="sine" , snake_case : Optional[Any]="resnet50" , snake_case : Optional[int]=True , snake_case : Any=False , snake_case : int=1 , snake_case : Tuple=5 , snake_case : Optional[int]=2 , snake_case : Tuple=1 , snake_case : Optional[Any]=1 , snake_case : Optional[Any]=5 , snake_case : Dict=2 , snake_case : Any=0.1 , **snake_case : Any , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
A__ : Optional[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(snake_case , snake_case ):
A__ : Optional[int] = backbone_config.get("""model_type""" )
A__ : Optional[int] = CONFIG_MAPPING[backbone_model_type]
A__ : List[str] = config_class.from_dict(snake_case )
# set timm attributes to None
A__ , A__ , A__ : str = None, None, None
A__ : Tuple = use_timm_backbone
A__ : str = backbone_config
A__ : str = num_channels
A__ : List[Any] = num_queries
A__ : Optional[Any] = d_model
A__ : Tuple = encoder_ffn_dim
A__ : Union[str, Any] = encoder_layers
A__ : List[Any] = encoder_attention_heads
A__ : Optional[int] = decoder_ffn_dim
A__ : Any = decoder_layers
A__ : int = decoder_attention_heads
A__ : Any = dropout
A__ : Dict = attention_dropout
A__ : Dict = activation_dropout
A__ : Tuple = activation_function
A__ : List[str] = init_std
A__ : List[str] = init_xavier_std
A__ : Any = encoder_layerdrop
A__ : Optional[Any] = decoder_layerdrop
A__ : Union[str, Any] = encoder_layers
A__ : Dict = auxiliary_loss
A__ : List[Any] = position_embedding_type
A__ : Optional[Any] = backbone
A__ : str = use_pretrained_backbone
A__ : Union[str, Any] = dilation
# Hungarian matcher
A__ : Tuple = class_cost
A__ : Optional[Any] = bbox_cost
A__ : Dict = giou_cost
# Loss coefficients
A__ : Any = mask_loss_coefficient
A__ : str = dice_loss_coefficient
A__ : str = bbox_loss_coefficient
A__ : Union[str, Any] = giou_loss_coefficient
A__ : List[str] = eos_coefficient
super().__init__(is_encoder_decoder=snake_case , **snake_case )
@property
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
return self.d_model
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = version.parse('1.11' )
@property
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
return 1e-5
@property
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
return 12
| 296 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
},
'tokenizer_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json',
},
}
lowerCAmelCase__ = {
'albert-base-v1': 5_12,
'albert-large-v1': 5_12,
'albert-xlarge-v1': 5_12,
'albert-xxlarge-v1': 5_12,
'albert-base-v2': 5_12,
'albert-large-v2': 5_12,
'albert-xlarge-v2': 5_12,
'albert-xxlarge-v2': 5_12,
}
lowerCAmelCase__ = '▁'
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = AlbertTokenizer
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase="[CLS]" , __lowerCamelCase="[SEP]" , __lowerCamelCase="<unk>" , __lowerCamelCase="[SEP]" , __lowerCamelCase="<pad>" , __lowerCamelCase="[CLS]" , __lowerCamelCase="[MASK]" , **__lowerCamelCase , ) -> Tuple:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_A : Tuple = (
AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase , normalized=__lowerCamelCase)
if isinstance(__lowerCamelCase , __lowerCamelCase)
else mask_token
)
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , remove_space=__lowerCamelCase , keep_accents=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , **__lowerCamelCase , )
_A : List[str] = do_lower_case
_A : Any = remove_space
_A : Tuple = keep_accents
_A : List[Any] = vocab_file
_A : Any = False if not self.vocab_file else True
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
_A : Optional[Any] = [self.sep_token_id]
_A : List[str] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
_A : str = [self.sep_token_id]
_A : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(__lowerCamelCase):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
_A : List[Any] = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__lowerCamelCase):
copyfile(self.vocab_file , __lowerCamelCase)
return (out_vocab_file,)
| 11 |
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
def _UpperCAmelCase (UpperCamelCase__ : type , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[List[str]] = None , ):
_A : Union[str, Any] = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" )
_A : Dict = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" )
_A : Dict = format_type
def _UpperCAmelCase (UpperCamelCase__ : Exception , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[List[str]] = None ):
_A : Union[str, Any] = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
_A : Union[str, Any] = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
lowerCAmelCase__ = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
lowerCAmelCase__ = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
lowerCAmelCase__ = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def _UpperCAmelCase (UpperCamelCase__ : Optional[str] ):
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def _UpperCAmelCase (UpperCamelCase__ : Optional[str] , **UpperCamelCase__ : List[Any] ):
_A : List[str] = get_format_type_from_alias(UpperCamelCase__ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**UpperCamelCase__ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'" )
| 11 | 1 |
from typing import Any
class __A:
def __init__( self , _snake_case ) -> Tuple:
'''simple docstring'''
__a = data
__a = None
def __repr__( self ) -> str:
'''simple docstring'''
return F"""Node({self.data})"""
class __A:
def __init__( self ) -> Any:
'''simple docstring'''
__a = None
def __iter__( self ) -> Any:
'''simple docstring'''
__a = self.head
while node:
yield node.data
__a = node.next
def __len__( self ) -> int:
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self ) -> str:
'''simple docstring'''
return "->".join([str(_snake_case ) for item in self] )
def __getitem__( self , _snake_case ) -> Any:
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('''list index out of range.''' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , _snake_case , _snake_case ) -> None:
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('''list index out of range.''' )
__a = self.head
for _ in range(_snake_case ):
__a = current.next
__a = data
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> None:
'''simple docstring'''
self.insert_nth(len(self ) , _snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> None:
'''simple docstring'''
self.insert_nth(0 , _snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> None:
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError('''list index out of range''' )
__a = Node(_snake_case )
if self.head is None:
__a = new_node
elif index == 0:
__a = self.head # link new_node to head
__a = new_node
else:
__a = self.head
for _ in range(index - 1 ):
__a = temp.next
__a = temp.next
__a = new_node
def SCREAMING_SNAKE_CASE_ ( self ) -> None: # print every node data
'''simple docstring'''
print(self )
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
return self.delete_nth(0 )
def SCREAMING_SNAKE_CASE_ ( self ) -> Any: # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case = 0 ) -> Any:
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('''List index out of range.''' )
__a = self.head # default first node
if index == 0:
__a = self.head.next
else:
__a = self.head
for _ in range(index - 1 ):
__a = temp.next
__a = temp.next
__a = temp.next.next
return delete_node.data
def SCREAMING_SNAKE_CASE_ ( self ) -> bool:
'''simple docstring'''
return self.head is None
def SCREAMING_SNAKE_CASE_ ( self ) -> None:
'''simple docstring'''
__a = None
__a = self.head
while current:
# Store the current node's next node.
__a = current.next
# Make the current node's next point backwards
__a = prev
# Make the previous node be the current node
__a = current
# Make the current node the next node (to progress iteration)
__a = next_node
# Return prev in order to put the head at the end
__a = prev
def __lowerCAmelCase ( ) -> None:
__a = LinkedList()
assert linked_list.is_empty() is True
assert str(a__ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(a__ ) == i
linked_list.insert_nth(a__ , i + 1 )
assert str(a__ ) == "->".join(str(a__ ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(a__ ) == "->".join(str(a__ ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(a__ ) == 9
assert str(a__ ) == "->".join(str(a__ ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
__a = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(a__ ) == "->".join(str(a__ ) for i in range(-8 , 1 ) )
def __lowerCAmelCase ( ) -> None:
__a = [
-9,
100,
Node(7734_5112 ),
'''dlrow olleH''',
7,
5555,
0,
-192.55_555,
'''Hello, world!''',
77.9,
Node(10 ),
None,
None,
12.20,
]
__a = LinkedList()
for i in test_input:
linked_list.insert_tail(a__ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(a__ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
__a = linked_list.delete_head()
assert result == -9
assert (
str(a__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
__a = linked_list.delete_tail()
assert result == 12.2
assert (
str(a__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
__a = linked_list.delete_nth(10 )
assert result is None
assert (
str(a__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('''Hello again, world!''' ) )
assert (
str(a__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(a__ )
assert (
str(a__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(a__ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def __lowerCAmelCase ( ) -> Union[str, Any]:
from doctest import testmod
testmod()
__a = LinkedList()
linked_list.insert_head(input('''Inserting 1st at head ''' ).strip() )
linked_list.insert_head(input('''Inserting 2nd at head ''' ).strip() )
print('''\nPrint list:''' )
linked_list.print_list()
linked_list.insert_tail(input('''\nInserting 1st at tail ''' ).strip() )
linked_list.insert_tail(input('''Inserting 2nd at tail ''' ).strip() )
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nDelete head''' )
linked_list.delete_head()
print('''Delete tail''' )
linked_list.delete_tail()
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nReverse linked list''' )
linked_list.reverse()
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nString representation of linked list:''' )
print(a__ )
print('''\nReading/changing Node data using indexing:''' )
print(F"""Element at Position 1: {linked_list[1]}""" )
__a = input('''Enter New Value: ''' ).strip()
print('''New list:''' )
print(a__ )
print(F"""length of linked_list is : {len(a__ )}""" )
if __name__ == "__main__":
main() | 33 |
from typing import List
from .keymap import KEYMAP, get_character
def __lowerCAmelCase ( a__ ) -> List[str]:
def decorator(a__ ):
__a = getattr(a__ , '''handle_key''' , [] )
handle += [key]
setattr(a__ , '''handle_key''' , a__ )
return func
return decorator
def __lowerCAmelCase ( *a__ ) -> str:
def decorator(a__ ):
__a = getattr(a__ , '''handle_key''' , [] )
handle += keys
setattr(a__ , '''handle_key''' , a__ )
return func
return decorator
class __A( a ):
def __new__( cls , _snake_case , _snake_case , _snake_case ) -> Union[str, Any]:
'''simple docstring'''
__a = super().__new__(cls , _snake_case , _snake_case , _snake_case )
if not hasattr(_snake_case , '''key_handler''' ):
setattr(_snake_case , '''key_handler''' , {} )
setattr(_snake_case , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
__a = getattr(_snake_case , '''handle_key''' , [] )
for key in handled_keys:
__a = value
return new_cls
@staticmethod
def SCREAMING_SNAKE_CASE_ ( cls ) -> List[str]:
'''simple docstring'''
__a = get_character()
if char != KEYMAP["undefined"]:
__a = ord(_snake_case )
__a = cls.key_handler.get(_snake_case )
if handler:
__a = char
return handler(cls )
else:
return None
def __lowerCAmelCase ( cls ) -> Union[str, Any]:
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() ) | 33 | 1 |
'''simple docstring'''
from collections import defaultdict
def _lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : str ) -> bool:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =first_str.lower().strip()
_SCREAMING_SNAKE_CASE =second_str.lower().strip()
# Remove whitespace
_SCREAMING_SNAKE_CASE =first_str.replace(' ' , '' )
_SCREAMING_SNAKE_CASE =second_str.replace(' ' , '' )
# Strings of different lengths are not anagrams
if len(_UpperCamelCase ) != len(_UpperCamelCase ):
return False
# Default values for count should be 0
_SCREAMING_SNAKE_CASE =defaultdict(_UpperCamelCase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(_UpperCamelCase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
lowerCamelCase : Optional[int] = input("Enter the first string ").strip()
lowerCamelCase : Optional[Any] = input("Enter the second string ").strip()
lowerCamelCase : List[str] = check_anagrams(input_a, input_b)
print(f'''{input_a} and {input_b} are {"" if status else "not "}anagrams.''')
| 47 |
"""simple docstring"""
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=30 , lowercase_=2 , lowercase_=3 , lowercase_=True , lowercase_=True , lowercase_=32 , lowercase_=5 , lowercase_=4 , lowercase_=37 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=10 , lowercase_=0.02 , lowercase_=3 , lowercase_=None , lowercase_=2 , ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : int = image_size
UpperCAmelCase_ : List[Any] = patch_size
UpperCAmelCase_ : Any = num_channels
UpperCAmelCase_ : Optional[int] = is_training
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : str = num_hidden_layers
UpperCAmelCase_ : List[str] = num_attention_heads
UpperCAmelCase_ : str = intermediate_size
UpperCAmelCase_ : str = hidden_act
UpperCAmelCase_ : List[Any] = hidden_dropout_prob
UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase_ : str = type_sequence_label_size
UpperCAmelCase_ : str = initializer_range
UpperCAmelCase_ : Union[str, Any] = scope
UpperCAmelCase_ : str = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
UpperCAmelCase_ : int = (image_size // patch_size) ** 2
UpperCAmelCase_ : Optional[Any] = num_patches + 2
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : Tuple = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = DeiTModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = DeiTForMaskedImageModeling(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowercase_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase_ : List[str] = 1
UpperCAmelCase_ : Optional[Any] = DeiTForMaskedImageModeling(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : Optional[int] = model(lowercase_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.type_sequence_label_size
UpperCAmelCase_ : Union[str, Any] = DeiTForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : List[str] = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ : Union[str, Any] = 1
UpperCAmelCase_ : Optional[int] = DeiTForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : List[Any] = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Dict = config_and_inputs
UpperCAmelCase_ : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A_ (lowercase__ ,lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : Tuple = (
{
"""feature-extraction""": DeiTModel,
"""image-classification""": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : List[Any] = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
SCREAMING_SNAKE_CASE__ : List[str] = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Any = DeiTModelTester(self )
UpperCAmelCase_ : Optional[int] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : List[Any] = model_class(lowercase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase_ : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Dict = model_class(lowercase_ )
UpperCAmelCase_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : str = [*signature.parameters.keys()]
UpperCAmelCase_ : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_=False ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCamelCase__ ( self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Union[str, Any] = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowercase_ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
UpperCAmelCase_ : Optional[int] = model_class(lowercase_ )
model.to(lowercase_ )
model.train()
UpperCAmelCase_ : List[Any] = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
UpperCAmelCase_ : Dict = model(**lowercase_ ).loss
loss.backward()
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCAmelCase_ : Dict = False
UpperCAmelCase_ : Optional[int] = True
for model_class in self.all_model_classes:
if model_class in get_values(lowercase_ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
UpperCAmelCase_ : List[str] = model_class(lowercase_ )
model.gradient_checkpointing_enable()
model.to(lowercase_ )
model.train()
UpperCAmelCase_ : Optional[int] = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
UpperCAmelCase_ : Any = model(**lowercase_ ).loss
loss.backward()
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Dict = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowercase_ ),
*get_values(lowercase_ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type["title"]}""" ):
UpperCAmelCase_ : str = problem_type["title"]
UpperCAmelCase_ : List[Any] = problem_type["num_labels"]
UpperCAmelCase_ : Union[str, Any] = model_class(lowercase_ )
model.to(lowercase_ )
model.train()
UpperCAmelCase_ : int = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
if problem_type["num_labels"] > 1:
UpperCAmelCase_ : List[Any] = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
UpperCAmelCase_ : Tuple = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowercase_ ) as warning_list:
UpperCAmelCase_ : List[str] = model(**lowercase_ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Union[str, Any] = DeiTModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def __a ( ):
UpperCAmelCase_ : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A_ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to(
lowercase_ )
UpperCAmelCase_ : List[str] = self.default_image_processor
UpperCAmelCase_ : List[str] = prepare_img()
UpperCAmelCase_ : int = image_processor(images=lowercase_ , return_tensors="pt" ).to(lowercase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Dict = model(**lowercase_ )
# verify the logits
UpperCAmelCase_ : List[str] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase_ )
UpperCAmelCase_ : str = torch.tensor([-1.02_66, 0.19_12, -1.28_61] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = DeiTModel.from_pretrained(
"facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" )
UpperCAmelCase_ : str = self.default_image_processor
UpperCAmelCase_ : Union[str, Any] = prepare_img()
UpperCAmelCase_ : List[Any] = image_processor(images=lowercase_ , return_tensors="pt" )
UpperCAmelCase_ : List[str] = inputs.pixel_values.to(lowercase_ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
UpperCAmelCase_ : int = model(lowercase_ )
| 61 | 0 |
"""simple docstring"""
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase__ ( _UpperCamelCase : Any , _UpperCamelCase : Optional[int] ) -> Dict:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def lowerCAmelCase__ ( _UpperCamelCase : Dict , _UpperCamelCase : int , _UpperCamelCase : int ) -> Optional[Any]:
"""simple docstring"""
snake_case = tmp_path / 'cache'
snake_case = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case = TextDatasetReader(_UpperCamelCase , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase ).read()
_check_text_dataset(_UpperCamelCase , _UpperCamelCase )
@pytest.mark.parametrize(
'features' , [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
] , )
def lowerCAmelCase__ ( _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : Dict ) -> Any:
"""simple docstring"""
snake_case = tmp_path / 'cache'
snake_case = {'text': 'string'}
snake_case = features.copy() if features else default_expected_features
snake_case = (
Features({feature: Value(_UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case = TextDatasetReader(_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase ).read()
_check_text_dataset(_UpperCamelCase , _UpperCamelCase )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def lowerCAmelCase__ ( _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : Tuple ) -> List[Any]:
"""simple docstring"""
snake_case = tmp_path / 'cache'
snake_case = {'text': 'string'}
snake_case = TextDatasetReader(_UpperCamelCase , cache_dir=_UpperCamelCase , split=_UpperCamelCase ).read()
_check_text_dataset(_UpperCamelCase , _UpperCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def lowerCAmelCase__ ( _UpperCamelCase : Any , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] ) -> List[str]:
"""simple docstring"""
if issubclass(_UpperCamelCase , _UpperCamelCase ):
snake_case = text_path
elif issubclass(_UpperCamelCase , _UpperCamelCase ):
snake_case = [text_path]
snake_case = tmp_path / 'cache'
snake_case = {'text': 'string'}
snake_case = TextDatasetReader(_UpperCamelCase , cache_dir=_UpperCamelCase ).read()
_check_text_dataset(_UpperCamelCase , _UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any]=("train",) ) -> Optional[int]:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase )
for split in splits:
snake_case = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def lowerCAmelCase__ ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[int] ) -> Any:
"""simple docstring"""
snake_case = tmp_path / 'cache'
snake_case = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case = TextDatasetReader({'train': text_path} , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase ).read()
_check_text_datasetdict(_UpperCamelCase , _UpperCamelCase )
@pytest.mark.parametrize(
'features' , [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
] , )
def lowerCAmelCase__ ( _UpperCamelCase : str , _UpperCamelCase : int , _UpperCamelCase : Any ) -> int:
"""simple docstring"""
snake_case = tmp_path / 'cache'
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
snake_case = {'text': 'string'}
snake_case = features.copy() if features else default_expected_features
snake_case = (
Features({feature: Value(_UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case = TextDatasetReader({'train': text_path} , features=_UpperCamelCase , cache_dir=_UpperCamelCase ).read()
_check_text_datasetdict(_UpperCamelCase , _UpperCamelCase )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def lowerCAmelCase__ ( _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
if split:
snake_case = {split: text_path}
else:
snake_case = 'train'
snake_case = {'train': text_path, 'test': text_path}
snake_case = tmp_path / 'cache'
snake_case = {'text': 'string'}
snake_case = TextDatasetReader(_UpperCamelCase , cache_dir=_UpperCamelCase ).read()
_check_text_datasetdict(_UpperCamelCase , _UpperCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 149 | """simple docstring"""
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Dict = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Dict = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Any = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : str = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Tuple = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : str = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Tuple = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Any = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Tuple = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Tuple = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : int = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Any = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : int = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Dict = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Any = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : str = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Dict = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Tuple = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
| 149 | 1 |
"""simple docstring"""
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
lowercase__ = {
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
lowercase__ = {
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def __lowerCamelCase ( __UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase_ : Tuple = (images / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase_ : str = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowerCAmelCase_ : List[str] = numpy_to_pil(__UpperCamelCase )
return images
def __lowerCamelCase ( __UpperCamelCase ) -> int:
"""simple docstring"""
if images.ndim == 3:
lowerCAmelCase_ : Union[str, Any] = images[None, ...]
lowerCAmelCase_ : Any = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
lowerCAmelCase_ : List[str] = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
lowerCAmelCase_ : Tuple = [Image.fromarray(__UpperCamelCase ) for image in images]
return pil_images
| 241 |
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Dict:
"""simple docstring"""
lowerCAmelCase_ : Union[str, Any] = 0
if start < end:
lowerCAmelCase_ : Dict = randint(__UpperCamelCase , __UpperCamelCase )
lowerCAmelCase_ : List[str] = a[end]
lowerCAmelCase_ : List[str] = a[pivot]
lowerCAmelCase_ : Any = temp
lowerCAmelCase_ , lowerCAmelCase_ : Any = _in_place_partition(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
count += _in_place_quick_sort(__UpperCamelCase , __UpperCamelCase , p - 1 )
count += _in_place_quick_sort(__UpperCamelCase , p + 1 , __UpperCamelCase )
return count
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> str:
"""simple docstring"""
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : Tuple = randint(__UpperCamelCase , __UpperCamelCase )
lowerCAmelCase_ : str = a[end]
lowerCAmelCase_ : List[Any] = a[pivot]
lowerCAmelCase_ : Optional[Any] = temp
lowerCAmelCase_ : Dict = start - 1
for index in range(__UpperCamelCase , __UpperCamelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowerCAmelCase_ : Dict = new_pivot_index + 1
lowerCAmelCase_ : Tuple = a[new_pivot_index]
lowerCAmelCase_ : List[Any] = a[index]
lowerCAmelCase_ : Optional[Any] = temp
lowerCAmelCase_ : Any = a[new_pivot_index + 1]
lowerCAmelCase_ : int = a[end]
lowerCAmelCase_ : str = temp
return new_pivot_index + 1, count
lowercase__ = TemporaryFile()
lowercase__ = 100 # 1000 elements are to be sorted
lowercase__ , lowercase__ = 0, 1 # mean and standard deviation
lowercase__ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("""The array is""")
print(X)
outfile.seek(0) # using the same array
lowercase__ = np.load(outfile)
lowercase__ = len(M) - 1
lowercase__ = _in_place_quick_sort(M, 0, r)
print(
"""No of Comparisons for 100 elements selected from a standard normal distribution"""
"""is :"""
)
print(z)
| 241 | 1 |
import enum
import shutil
import sys
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = shutil.get_terminal_size()
SCREAMING_SNAKE_CASE : int = {"UP": "A", "DOWN": "B", "RIGHT": "C", "LEFT": "D"}
class UpperCamelCase ( enum.Enum ):
'''simple docstring'''
lowercase : Dict =0
lowercase : List[Any] =1
def UpperCamelCase ( _a , _a="" ) -> str:
'''simple docstring'''
sys.stdout.write(str(_a ) + end )
sys.stdout.flush()
def UpperCamelCase ( _a , _a , _a="" ) -> List[str]:
'''simple docstring'''
forceWrite(f"\u001b[{color}m{content}\u001b[0m" , _a )
def UpperCamelCase ( ) -> Optional[int]:
'''simple docstring'''
forceWrite('''\r''' )
def UpperCamelCase ( _a , _a ) -> Dict:
'''simple docstring'''
forceWrite(f"\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}" )
def UpperCamelCase ( ) -> int:
'''simple docstring'''
forceWrite(''' ''' * TERMINAL_WIDTH )
reset_cursor()
def UpperCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
reset_cursor()
forceWrite('''-''' * TERMINAL_WIDTH )
| 252 |
from itertools import count
def UpperCamelCase ( _a = 5_0 ) -> int:
'''simple docstring'''
lowercase_ :Dict = [1] * min_block_length
for n in count(_a ):
fill_count_functions.append(1 )
for block_length in range(_a , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_0_0_0_0_0_0:
break
return n
if __name__ == "__main__":
print(f"{solution() = }")
| 252 | 1 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class _snake_case ( _lowercase , unittest.TestCase ):
lowerCamelCase__: Dict = XLMRobertaTokenizer
lowerCamelCase__: List[str] = XLMRobertaTokenizerFast
lowerCamelCase__: Dict = True
lowerCamelCase__: Optional[Any] = True
def _lowerCamelCase ( self: List[str] ) -> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCAmelCase : str = XLMRobertaTokenizer(__lowerCamelCase , keep_accents=__lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self: Union[str, Any] ) -> str:
__UpperCAmelCase : Union[str, Any] = "<pad>"
__UpperCAmelCase : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase )
def _lowerCamelCase ( self: int ) -> Optional[int]:
__UpperCAmelCase : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(__lowerCamelCase ) , 10_02 )
def _lowerCamelCase ( self: Optional[int] ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 10_02 )
def _lowerCamelCase ( self: Optional[int] ) -> List[Any]:
__UpperCAmelCase : Optional[int] = XLMRobertaTokenizer(__lowerCamelCase , keep_accents=__lowerCamelCase )
__UpperCAmelCase : Dict = tokenizer.tokenize("This is a test" )
self.assertListEqual(__lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
__UpperCAmelCase : Dict = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
__UpperCAmelCase : List[str] = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
__UpperCAmelCase : int = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def _lowerCamelCase ( self: Tuple ) -> Tuple:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__UpperCAmelCase : int = (self.rust_tokenizer_class, "hf-internal-testing/tiny-xlm-roberta", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase : List[str] = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
__UpperCAmelCase : Any = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
__UpperCAmelCase : str = tempfile.mkdtemp()
__UpperCAmelCase : List[str] = tokenizer_r.save_pretrained(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = tokenizer_p.save_pretrained(__lowerCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
__UpperCAmelCase : Optional[Any] = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(__lowerCamelCase , __lowerCamelCase )
# Checks everything loads correctly in the same way
__UpperCAmelCase : Any = tokenizer_r.from_pretrained(__lowerCamelCase )
__UpperCAmelCase : int = tokenizer_p.from_pretrained(__lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCamelCase , __lowerCamelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__lowerCamelCase )
# Save tokenizer rust, legacy_format=True
__UpperCAmelCase : Dict = tempfile.mkdtemp()
__UpperCAmelCase : Dict = tokenizer_r.save_pretrained(__lowerCamelCase , legacy_format=__lowerCamelCase )
__UpperCAmelCase : Tuple = tokenizer_p.save_pretrained(__lowerCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(__lowerCamelCase , __lowerCamelCase )
# Checks everything loads correctly in the same way
__UpperCAmelCase : str = tokenizer_r.from_pretrained(__lowerCamelCase )
__UpperCAmelCase : Tuple = tokenizer_p.from_pretrained(__lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCamelCase , __lowerCamelCase ) )
shutil.rmtree(__lowerCamelCase )
# Save tokenizer rust, legacy_format=False
__UpperCAmelCase : Any = tempfile.mkdtemp()
__UpperCAmelCase : List[Any] = tokenizer_r.save_pretrained(__lowerCamelCase , legacy_format=__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = tokenizer_p.save_pretrained(__lowerCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__UpperCAmelCase : str = tokenizer_r.from_pretrained(__lowerCamelCase )
__UpperCAmelCase : List[str] = tokenizer_p.from_pretrained(__lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCamelCase , __lowerCamelCase ) )
shutil.rmtree(__lowerCamelCase )
@cached_property
def _lowerCamelCase ( self: List[Any] ) -> Optional[Any]:
return XLMRobertaTokenizer.from_pretrained("xlm-roberta-base" )
def _lowerCamelCase ( self: Union[str, Any] ) -> List[str]:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__lowerCamelCase , f.name )
__UpperCAmelCase : Optional[int] = XLMRobertaTokenizer(f.name , keep_accents=__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = pickle.dumps(__lowerCamelCase )
pickle.loads(__lowerCamelCase )
def _lowerCamelCase ( self: Optional[Any] ) -> Union[str, Any]:
if not self.test_rust_tokenizer:
return
__UpperCAmelCase : List[str] = self.get_tokenizer()
__UpperCAmelCase : str = self.get_rust_tokenizer()
__UpperCAmelCase : Tuple = "I was born in 92000, and this is falsé."
__UpperCAmelCase : List[str] = tokenizer.tokenize(__lowerCamelCase )
__UpperCAmelCase : Dict = rust_tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : int = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
__UpperCAmelCase : List[Any] = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Optional[int] = self.get_rust_tokenizer()
__UpperCAmelCase : int = tokenizer.encode(__lowerCamelCase )
__UpperCAmelCase : int = rust_tokenizer.encode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
@slow
def _lowerCamelCase ( self: Optional[int] ) -> List[Any]:
__UpperCAmelCase : Optional[int] = "Hello World!"
__UpperCAmelCase : str = [0, 3_53_78, 66_61, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__lowerCamelCase , self.big_tokenizer.encode(__lowerCamelCase ) )
@slow
def _lowerCamelCase ( self: int ) -> int:
__UpperCAmelCase : Any = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
__UpperCAmelCase : List[str] = [
0,
32_93,
83,
10,
45_52,
49_89,
79_86,
6_78,
10,
59_15,
1_11,
17_94_59,
12_48_50,
4,
60_44,
2_37,
12,
6,
5,
6,
4,
67_80,
7_05,
15,
13_88,
44,
3_78,
1_01_14,
7_11,
1_52,
20,
6,
5,
2_23_76,
6_42,
12_21,
1_51_90,
3_41_53,
4_50,
56_08,
9_59,
11_19,
5_77_02,
1_36,
1_86,
47,
10_98,
2_93_67,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
60_44,
2_37,
62_84,
5_09_01,
5_28,
31,
90,
34,
9_27,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__lowerCamelCase , self.big_tokenizer.encode(__lowerCamelCase ) )
@slow
def _lowerCamelCase ( self: Union[str, Any] ) -> Tuple:
# fmt: off
__UpperCAmelCase : Tuple = {"input_ids": [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase , model_name="xlm-roberta-base" , revision="d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3" , )
| 157 | # Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
_snake_case = '''pytorch_model.bin'''
_snake_case = '''pytorch_model.bin.index.json'''
_snake_case = '''adapter_config.json'''
_snake_case = '''adapter_model.bin'''
_snake_case = '''adapter_model.safetensors'''
_snake_case = '''tf_model.h5'''
_snake_case = '''tf_model.h5.index.json'''
_snake_case = '''model.ckpt'''
_snake_case = '''flax_model.msgpack'''
_snake_case = '''flax_model.msgpack.index.json'''
_snake_case = '''model.safetensors'''
_snake_case = '''model.safetensors.index.json'''
_snake_case = '''config.json'''
_snake_case = '''preprocessor_config.json'''
_snake_case = FEATURE_EXTRACTOR_NAME
_snake_case = '''generation_config.json'''
_snake_case = '''modelcard.json'''
_snake_case = '''▁'''
_snake_case = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
_snake_case = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
_snake_case = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
_snake_case = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def _UpperCamelCase ( snake_case__ ) -> Any:
if version.parse(snake_case__ ) < version.parse(snake_case__ ):
if "dev" in min_version:
__UpperCAmelCase : Dict = (
"This example requires a source install from HuggingFace Transformers (see "
"`https://huggingface.co/docs/transformers/installation#install-from-source`),"
)
else:
__UpperCAmelCase : str = f'''This example requires a minimum version of {min_version},'''
error_message += f''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ "Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other "
"versions of HuggingFace Transformers." )
| 157 | 1 |
'''simple docstring'''
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def _A ( snake_case ) -> Dict:
return EnvironmentCommand()
def _A ( snake_case ) -> Any:
return EnvironmentCommand(args.accelerate_config_file )
class a__ ( a__ ):
@staticmethod
def _lowerCamelCase ( _UpperCamelCase ):
"""simple docstring"""
_lowercase : str = parser.add_parser("env" )
download_parser.set_defaults(func=_lowerCamelCase )
download_parser.add_argument(
"--accelerate-config_file" , default=_lowerCamelCase , help="The accelerate config file to use for the default values in the launching script." , )
download_parser.set_defaults(func=_lowerCamelCase )
def __init__( self , _UpperCamelCase , *_UpperCamelCase ):
"""simple docstring"""
_lowercase : List[Any] = accelerate_config_file
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[Any] = '''not installed'''
if is_safetensors_available():
import safetensors
_lowercase : Any = safetensors.__version__
elif importlib.util.find_spec("safetensors" ) is not None:
import safetensors
_lowercase : int = f'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
_lowercase : Optional[int] = '''not installed'''
_lowercase : Any = '''not found'''
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
_lowercase : List[Any] = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(_lowerCamelCase ):
_lowercase : Union[str, Any] = load_config_from_file(self._accelerate_config_file ).to_dict()
_lowercase : Tuple = (
'''\n'''.join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(_lowerCamelCase , _lowerCamelCase )
else f'''\t{accelerate_config}'''
)
_lowercase : List[str] = '''not installed'''
_lowercase : List[Any] = '''NA'''
if is_torch_available():
import torch
_lowercase : List[str] = torch.__version__
_lowercase : Optional[Any] = torch.cuda.is_available()
_lowercase : str = '''not installed'''
_lowercase : Any = '''NA'''
if is_tf_available():
import tensorflow as tf
_lowercase : int = tf.__version__
try:
# deprecated in v2.1
_lowercase : Dict = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
_lowercase : Any = bool(tf.config.list_physical_devices("GPU" ) )
_lowercase : Any = '''not installed'''
_lowercase : Union[str, Any] = '''not installed'''
_lowercase : int = '''not installed'''
_lowercase : Union[str, Any] = '''NA'''
if is_flax_available():
import flax
import jax
import jaxlib
_lowercase : Optional[int] = flax.__version__
_lowercase : Tuple = jax.__version__
_lowercase : Optional[int] = jaxlib.__version__
_lowercase : List[Any] = jax.lib.xla_bridge.get_backend().platform
_lowercase : Any = {
'''`transformers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Huggingface_hub version''': huggingface_hub.__version__,
'''Safetensors version''': f'''{safetensors_version}''',
'''Accelerate version''': f'''{accelerate_version}''',
'''Accelerate config''': f'''{accelerate_config_str}''',
'''PyTorch version (GPU?)''': f'''{pt_version} ({pt_cuda_available})''',
'''Tensorflow version (GPU?)''': f'''{tf_version} ({tf_cuda_available})''',
'''Flax version (CPU?/GPU?/TPU?)''': f'''{flax_version} ({jax_backend})''',
'''Jax version''': f'''{jax_version}''',
'''JaxLib version''': f'''{jaxlib_version}''',
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(_lowerCamelCase ) )
return info
@staticmethod
def _lowerCamelCase ( _UpperCamelCase ):
"""simple docstring"""
return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 369 |
'''simple docstring'''
def _A ( snake_case , snake_case ) -> int:
return int((input_a, input_a).count(0 ) != 0 )
def _A ( ) -> None:
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 199 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case = {
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 320 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowerCamelCase =logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = ["""pixel_values"""]
def __init__( self , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , __magic_name__ = PILImageResampling.BILINEAR , __magic_name__ = True , __magic_name__ = 1 / 2_5_5 , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase : Dict = size if size is not None else {"""shortest_edge""": 3_8_4}
lowerCamelCase : Tuple = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : Dict = do_resize
lowerCamelCase : List[Any] = size
# Default value set here for backwards compatibility where the value in config is None
lowerCamelCase : Any = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
lowerCamelCase : Union[str, Any] = resample
lowerCamelCase : str = do_rescale
lowerCamelCase : Union[str, Any] = rescale_factor
lowerCamelCase : Tuple = do_normalize
lowerCamelCase : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = PILImageResampling.BICUBIC , __magic_name__ = None , **__magic_name__ , ):
lowerCamelCase : Union[str, Any] = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
if "shortest_edge" not in size:
raise ValueError(F'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
lowerCamelCase : str = size["""shortest_edge"""]
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowerCamelCase : List[str] = int(shortest_edge / crop_pct )
lowerCamelCase : Optional[Any] = get_resize_output_image_size(__magic_name__ , size=__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : Optional[int] = resize(image=__magic_name__ , size=__magic_name__ , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__magic_name__ , size=(shortest_edge, shortest_edge) , data_format=__magic_name__ , **__magic_name__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__magic_name__ , size=(shortest_edge, shortest_edge) , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ):
return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ):
return normalize(__magic_name__ , mean=__magic_name__ , std=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = ChannelDimension.FIRST , **__magic_name__ , ):
lowerCamelCase : str = do_resize if do_resize is not None else self.do_resize
lowerCamelCase : Optional[Any] = crop_pct if crop_pct is not None else self.crop_pct
lowerCamelCase : Optional[int] = resample if resample is not None else self.resample
lowerCamelCase : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase : Optional[Any] = image_mean if image_mean is not None else self.image_mean
lowerCamelCase : Tuple = image_std if image_std is not None else self.image_std
lowerCamelCase : Dict = size if size is not None else self.size
lowerCamelCase : str = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : List[str] = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCamelCase : Optional[Any] = [to_numpy_array(__magic_name__ ) for image in images]
if do_resize:
lowerCamelCase : List[Any] = [self.resize(image=__magic_name__ , size=__magic_name__ , crop_pct=__magic_name__ , resample=__magic_name__ ) for image in images]
if do_rescale:
lowerCamelCase : Union[str, Any] = [self.rescale(image=__magic_name__ , scale=__magic_name__ ) for image in images]
if do_normalize:
lowerCamelCase : List[Any] = [self.normalize(image=__magic_name__ , mean=__magic_name__ , std=__magic_name__ ) for image in images]
lowerCamelCase : Optional[int] = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
lowerCamelCase : List[str] = {"""pixel_values""": images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
| 287 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : List[str] ,_a : Optional[Any] ,_a : Any=13 ,_a : str=[30, 30] ,_a : Dict=2 ,_a : Optional[Any]=3 ,_a : int=True ,_a : List[Any]=True ,_a : Union[str, Any]=32 ,_a : Dict=5 ,_a : List[str]=4 ,_a : int=37 ,_a : List[str]="gelu" ,_a : Dict=0.1 ,_a : Union[str, Any]=0.1 ,_a : List[str]=10 ,_a : Union[str, Any]=0.02 ,_a : Union[str, Any]=3 ,_a : Any=None ,_a : Optional[Any]=8 ,_a : Tuple=10 ,):
'''simple docstring'''
_a : List[str] = parent
_a : int = batch_size
_a : Optional[int] = image_size
_a : Optional[int] = patch_size
_a : int = num_channels
_a : Dict = is_training
_a : Tuple = use_labels
_a : str = hidden_size
_a : Dict = num_hidden_layers
_a : List[Any] = num_attention_heads
_a : List[str] = intermediate_size
_a : int = hidden_act
_a : Any = hidden_dropout_prob
_a : Optional[Any] = attention_probs_dropout_prob
_a : Optional[Any] = type_sequence_label_size
_a : Tuple = initializer_range
_a : Tuple = num_labels
_a : Tuple = scope
_a : List[str] = n_targets
_a : str = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
_a : List[Any] = (image_size[1] // patch_size) * (image_size[0] // patch_size)
_a : str = num_patches + 1 + self.num_detection_tokens
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
_a : Union[str, Any] = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
_a : Dict = []
for i in range(self.batch_size ):
_a : Optional[Any] = {}
_a : Union[str, Any] = torch.randint(
high=self.num_labels ,size=(self.n_targets,) ,device=_a )
_a : str = torch.rand(self.n_targets ,4 ,device=_a )
labels.append(_a )
_a : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Any ):
'''simple docstring'''
return YolosConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_a ,initializer_range=self.initializer_range ,num_detection_tokens=self.num_detection_tokens ,num_labels=self.num_labels ,)
def __lowercase ( self : List[str] ,_a : Optional[int] ,_a : Any ,_a : Dict ):
'''simple docstring'''
_a : List[str] = YolosModel(config=_a )
model.to(_a )
model.eval()
_a : Tuple = model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.expected_seq_len, self.hidden_size) )
def __lowercase ( self : Optional[Any] ,_a : Union[str, Any] ,_a : str ,_a : Optional[int] ):
'''simple docstring'''
_a : Tuple = YolosForObjectDetection(_a )
model.to(_a )
model.eval()
_a : str = model(pixel_values=_a )
_a : Any = model(_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape ,(self.batch_size, self.num_detection_tokens, 4) )
_a : Tuple = model(pixel_values=_a ,labels=_a )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape ,(self.batch_size, self.num_detection_tokens, 4) )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Optional[Any] = self.prepare_config_and_inputs()
_a, _a, _a : Union[str, Any] = config_and_inputs
_a : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
__UpperCAmelCase : List[Any] = (
{'''feature-extraction''': YolosModel, '''object-detection''': YolosForObjectDetection} if is_torch_available() else {}
)
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : Dict = False
def __lowercase ( self : List[Any] ,_a : Optional[int] ,_a : Tuple ,_a : List[str]=False ):
'''simple docstring'''
_a : List[Any] = super()._prepare_for_class(_a ,_a ,return_labels=_a )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
_a : Optional[Any] = []
for i in range(self.model_tester.batch_size ):
_a : Tuple = {}
_a : str = torch.ones(
size=(self.model_tester.n_targets,) ,device=_a ,dtype=torch.long )
_a : Any = torch.ones(
self.model_tester.n_targets ,4 ,device=_a ,dtype=torch.float )
labels.append(_a )
_a : Any = labels
return inputs_dict
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Union[str, Any] = YolosModelTester(self )
_a : str = ConfigTester(self ,config_class=_a ,has_text_modality=_a ,hidden_size=37 )
def __lowercase ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
pass
def __lowercase ( self : Any ):
'''simple docstring'''
_a, _a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[str] = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_a : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a ,nn.Linear ) )
def __lowercase ( self : str ):
'''simple docstring'''
_a, _a : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : str = model_class(_a )
_a : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Optional[int] = [*signature.parameters.keys()]
_a : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_a )
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a, _a : int = self.model_tester.prepare_config_and_inputs_for_common()
_a : Tuple = True
# in YOLOS, the seq_len is different
_a : str = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
_a : Union[str, Any] = True
_a : List[Any] = False
_a : List[str] = True
_a : Tuple = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_a : Optional[int] = model(**self._prepare_for_class(_a ,_a ) )
_a : List[Any] = outputs.attentions
self.assertEqual(len(_a ) ,self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_a : Tuple = True
_a : Union[str, Any] = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_a : List[Any] = model(**self._prepare_for_class(_a ,_a ) )
_a : int = outputs.attentions
self.assertEqual(len(_a ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_len, seq_len] ,)
_a : Any = len(_a )
# Check attention is always last and order is fine
_a : int = True
_a : List[Any] = True
_a : str = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_a : Tuple = model(**self._prepare_for_class(_a ,_a ) )
_a : List[str] = 1
self.assertEqual(out_len + added_hidden_states ,len(_a ) )
_a : Union[str, Any] = outputs.attentions
self.assertEqual(len(_a ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_len, seq_len] ,)
def __lowercase ( self : List[Any] ):
'''simple docstring'''
def check_hidden_states_output(_a : List[Any] ,_a : Optional[int] ,_a : int ):
_a : List[str] = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_a : int = model(**self._prepare_for_class(_a ,_a ) )
_a : Any = outputs.hidden_states
_a : Union[str, Any] = getattr(
self.model_tester ,'expected_num_hidden_layers' ,self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_a ) ,_a )
# YOLOS has a different seq_length
_a : Union[str, Any] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[seq_length, self.model_tester.hidden_size] ,)
_a, _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Dict = True
check_hidden_states_output(_a ,_a ,_a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : Optional[int] = True
check_hidden_states_output(_a ,_a ,_a )
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*_a )
@slow
def __lowercase ( self : Any ):
'''simple docstring'''
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Union[str, Any] = YolosModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self : int ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('hustvl/yolos-small' ) if is_vision_available() else None
@slow
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : List[Any] = YolosForObjectDetection.from_pretrained('hustvl/yolos-small' ).to(_a )
_a : Tuple = self.default_image_processor
_a : Optional[int] = prepare_img()
_a : Any = image_processor(images=_a ,return_tensors='pt' ).to(_a )
# forward pass
with torch.no_grad():
_a : List[str] = model(inputs.pixel_values )
# verify outputs
_a : List[Any] = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape ,_a )
_a : str = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] ,device=_a ,)
_a : Tuple = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] ,device=_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] ,_a ,atol=1E-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] ,_a ,atol=1E-4 ) )
# verify postprocessing
_a : str = image_processor.post_process_object_detection(
_a ,threshold=0.3 ,target_sizes=[image.size[::-1]] )[0]
_a : Union[str, Any] = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861] ).to(_a )
_a : Dict = [75, 75, 17, 63, 17]
_a : List[str] = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495] ).to(_a )
self.assertEqual(len(results['scores'] ) ,5 )
self.assertTrue(torch.allclose(results['scores'] ,_a ,atol=1E-4 ) )
self.assertSequenceEqual(results['labels'].tolist() ,_a )
self.assertTrue(torch.allclose(results['boxes'][0, :] ,_a ) )
| 5 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ (__a : str = "https://www.worldometers.info/coronavirus" ):
"""simple docstring"""
_a : List[str] = BeautifulSoup(requests.get(__a ).text , 'html.parser' )
_a : Dict = soup.findAll('h1' )
_a : Union[str, Any] = soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(__a , __a )}
if __name__ == "__main__":
print("""\033[1m""" + """COVID-19 Status of the World""" + """\033[0m\n""")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 5 | 1 |
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__lowerCAmelCase = 16
__lowerCAmelCase = 32
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ = 16 , lowerCAmelCase_ = "bert-base-cased" ) -> str:
_a : Optional[int] = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
_a : int = load_dataset('glue' , 'mrpc' )
def tokenize_function(lowerCAmelCase_ ):
# max_length=None => use the model max length (it's actually the default)
_a : str = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_a : str = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=lowerCAmelCase_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_a : int = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowerCAmelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCAmelCase_ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(lowerCAmelCase_ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
_a : Optional[int] = DataLoader(
tokenized_datasets['train'] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
_a : int = DataLoader(
tokenized_datasets['validation'] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
model.eval()
_a : List[str] = 0
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_a : int = model(**lowerCAmelCase_ )
_a : Optional[int] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_a , _a : List[str] = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowerCAmelCase_ ) - 1:
_a : int = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_a : int = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , )
_a : List[Any] = metric.compute()
return eval_metric["accuracy"]
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
# Initialize accelerator
_a : Optional[int] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_a : List[str] = config['lr']
_a : int = int(config['num_epochs'] )
_a : Union[str, Any] = int(config['seed'] )
_a : Optional[Any] = int(config['batch_size'] )
_a : List[str] = args.model_name_or_path
set_seed(lowerCAmelCase_ )
_a , _a : Dict = get_dataloaders(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_a : Tuple = AutoModelForSequenceClassification.from_pretrained(lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
# Instantiate optimizer
_a : List[str] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_a : Tuple = optimizer_cls(params=model.parameters() , lr=lowerCAmelCase_ )
if accelerator.state.deepspeed_plugin is not None:
_a : int = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
_a : Any = 1
_a : List[Any] = (len(lowerCAmelCase_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_a : Any = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_ , num_warmup_steps=0 , num_training_steps=lowerCAmelCase_ , )
else:
_a : int = DummyScheduler(lowerCAmelCase_ , total_num_steps=lowerCAmelCase_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_a , _a , _a , _a , _a : Dict = accelerator.prepare(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# We need to keep track of how many total steps we have iterated over
_a : str = 0
# We also need to keep track of the stating epoch so files are named properly
_a : Optional[Any] = 0
_a : List[Any] = evaluate.load('glue' , 'mrpc' )
_a : List[Any] = num_epochs
if args.partial_train_epoch is not None:
_a : Dict = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
_a : Any = args.resume_from_checkpoint.split('epoch_' )[1]
_a : Optional[int] = ''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
_a : Dict = int(lowerCAmelCase_ ) + 1
_a : Dict = evaluation_loop(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
accelerator.print('resumed checkpoint performance:' , lowerCAmelCase_ )
accelerator.print('resumed checkpoint\'s scheduler\'s lr:' , lr_scheduler.get_lr()[0] )
accelerator.print('resumed optimizers\'s lr:' , optimizer.param_groups[0]['lr'] )
with open(os.path.join(args.output_dir , f"""state_{starting_epoch-1}.json""" ) , 'r' ) as f:
_a : str = json.load(lowerCAmelCase_ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
_a : Any = {}
for epoch in range(lowerCAmelCase_ , lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
_a : Optional[int] = model(**lowerCAmelCase_ )
_a : str = outputs.loss
_a : str = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
_a : List[Any] = f"""epoch_{epoch}"""
_a : Union[str, Any] = os.path.join(args.output_dir , lowerCAmelCase_ )
accelerator.save_state(lowerCAmelCase_ )
_a : List[str] = evaluation_loop(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_a : Optional[Any] = accuracy
_a : List[Any] = lr_scheduler.get_lr()[0]
_a : Any = optimizer.param_groups[0]['lr']
_a : Dict = epoch
_a : List[Any] = overall_step
accelerator.print(f"""epoch {epoch}:""" , lowerCAmelCase_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f"""state_{epoch}.json""" ) , 'w' ) as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCamelCase ( ) -> List[Any]:
_a : Union[str, Any] = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=lowerCAmelCase_ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=lowerCAmelCase_ , )
parser.add_argument(
'--output_dir' , type=lowerCAmelCase_ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=lowerCAmelCase_ , default=lowerCAmelCase_ , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--partial_train_epoch' , type=lowerCAmelCase_ , default=lowerCAmelCase_ , help='If passed, the training will stop after this number of epochs.' , )
parser.add_argument(
'--num_epochs' , type=lowerCAmelCase_ , default=2 , help='Number of train epochs.' , )
_a : Tuple = parser.parse_args()
_a : Optional[Any] = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 89 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/spiece.model",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/spiece.model",
},
"tokenizer_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json",
},
}
_snake_case = {
"google/fnet-base": 512,
"google/fnet-large": 512,
}
_snake_case = "▁"
class lowercase ( UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ["input_ids", "token_type_ids"]
_a = FNetTokenizer
def __init__( self , _a=None , _a=None , _a=False , _a=True , _a=True , _a="<unk>" , _a="[SEP]" , _a="<pad>" , _a="[CLS]" , _a="[MASK]" , **_a , ) -> Optional[int]:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_A : int = (
AddedToken(_a , lstrip=_a , rstrip=_a , normalized=_a )
if isinstance(_a , _a )
else mask_token
)
super().__init__(
_a , tokenizer_file=_a , do_lower_case=_a , remove_space=_a , keep_accents=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , **_a , )
_A : Optional[int] = do_lower_case
_A : List[Any] = remove_space
_A : str = keep_accents
_A : int = vocab_file
_A : int = False if not self.vocab_file else True
def a__ ( self , _a , _a = None ) -> List[int]:
_A : str = [self.sep_token_id]
_A : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a__ ( self , _a , _a = None ) -> List[int]:
_A : Any = [self.sep_token_id]
_A : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ ( self , _a , _a = None ) -> Tuple[str]:
if not os.path.isdir(_a ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A : List[str] = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 26 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : torch.FloatTensor
class A (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@register_to_config
def __init__( self : int , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 3 , __lowerCAmelCase : Tuple[str] = ("DownEncoderBlock2D",) , __lowerCAmelCase : Tuple[str] = ("UpDecoderBlock2D",) , __lowerCAmelCase : Tuple[int] = (64,) , __lowerCAmelCase : int = 1 , __lowerCAmelCase : str = "silu" , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 32 , __lowerCAmelCase : int = 2_56 , __lowerCAmelCase : int = 32 , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : float = 0.1_8_2_1_5 , __lowerCAmelCase : str = "group" , ) -> int:
"""simple docstring"""
super().__init__()
# pass init params to Encoder
A__ = Encoder(
in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , down_block_types=__lowerCAmelCase , block_out_channels=__lowerCAmelCase , layers_per_block=__lowerCAmelCase , act_fn=__lowerCAmelCase , norm_num_groups=__lowerCAmelCase , double_z=__lowerCAmelCase , )
A__ = vq_embed_dim if vq_embed_dim is not None else latent_channels
A__ = nn.Convad(__lowerCAmelCase , __lowerCAmelCase , 1 )
A__ = VectorQuantizer(__lowerCAmelCase , __lowerCAmelCase , beta=0.2_5 , remap=__lowerCAmelCase , sane_index_shape=__lowerCAmelCase )
A__ = nn.Convad(__lowerCAmelCase , __lowerCAmelCase , 1 )
# pass init params to Decoder
A__ = Decoder(
in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , up_block_types=__lowerCAmelCase , block_out_channels=__lowerCAmelCase , layers_per_block=__lowerCAmelCase , act_fn=__lowerCAmelCase , norm_num_groups=__lowerCAmelCase , norm_type=__lowerCAmelCase , )
@apply_forward_hook
def a_ ( self : int , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : bool = True ) -> VQEncoderOutput:
"""simple docstring"""
A__ = self.encoder(__lowerCAmelCase )
A__ = self.quant_conv(__lowerCAmelCase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__lowerCAmelCase )
@apply_forward_hook
def a_ ( self : Any , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
if not force_not_quantize:
A__ , A__ , A__ = self.quantize(__lowerCAmelCase )
else:
A__ = h
A__ = self.post_quant_conv(__lowerCAmelCase )
A__ = self.decoder(__lowerCAmelCase , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowerCAmelCase )
def a_ ( self : int , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
A__ = sample
A__ = self.encode(__lowerCAmelCase ).latents
A__ = self.decode(__lowerCAmelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowerCAmelCase )
| 365 |
def __lowerCamelCase ( __a :float , __a :list[float] ) -> float:
"""simple docstring"""
if discount_rate < 0:
raise ValueError("""Discount rate cannot be negative""" )
if not cash_flows:
raise ValueError("""Cash flows list cannot be empty""" )
A__ = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(__a ) )
return round(__a , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 276 | 0 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class lowercase :
"""simple docstring"""
def __init__( self ,a_ ,a_=13 ,a_=7 ,a_=True ,a_=True ,a_=True ,a_=True ,a_=99 ,a_=64 ,a_=5 ,a_=4 ,a_=37 ,a_="gelu" ,a_=0.1 ,a_=0.1 ,a_=512 ,a_=16 ,a_=2 ,a_=0.02 ,a_=3 ,a_=4 ,a_=None ,) -> Tuple:
_UpperCAmelCase : Optional[int] = parent
_UpperCAmelCase : Dict = batch_size
_UpperCAmelCase : Optional[int] = seq_length
_UpperCAmelCase : List[Any] = is_training
_UpperCAmelCase : List[Any] = use_input_mask
_UpperCAmelCase : Any = use_token_type_ids
_UpperCAmelCase : Optional[int] = use_labels
_UpperCAmelCase : List[Any] = vocab_size
_UpperCAmelCase : Optional[Any] = hidden_size
_UpperCAmelCase : List[Any] = num_hidden_layers
_UpperCAmelCase : Any = num_attention_heads
_UpperCAmelCase : List[str] = intermediate_size
_UpperCAmelCase : Union[str, Any] = hidden_act
_UpperCAmelCase : List[Any] = hidden_dropout_prob
_UpperCAmelCase : str = attention_probs_dropout_prob
_UpperCAmelCase : Union[str, Any] = max_position_embeddings
_UpperCAmelCase : Union[str, Any] = type_vocab_size
_UpperCAmelCase : int = type_sequence_label_size
_UpperCAmelCase : List[Any] = initializer_range
_UpperCAmelCase : Any = num_labels
_UpperCAmelCase : int = num_choices
_UpperCAmelCase : Union[str, Any] = scope
_UpperCAmelCase : Union[str, Any] = vocab_size - 1
def _snake_case ( self ) -> List[str]:
_UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_UpperCAmelCase : Any = None
if self.use_input_mask:
_UpperCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : int = None
if self.use_labels:
_UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
_UpperCAmelCase : Any = self.get_config()
return config, input_ids, input_mask, token_labels
def _snake_case ( self ) -> List[str]:
return GPTNeoXConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=a_ ,initializer_range=self.initializer_range ,pad_token_id=self.pad_token_id ,)
def _snake_case ( self ) -> Tuple:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
_UpperCAmelCase : Any = True
return config, input_ids, input_mask, token_labels
def _snake_case ( self ,a_ ,a_ ,a_ ) -> str:
_UpperCAmelCase : Any = GPTNeoXModel(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase : Tuple = model(a_ ,attention_mask=a_ )
_UpperCAmelCase : Optional[int] = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self ,a_ ,a_ ,a_ ) -> str:
_UpperCAmelCase : Dict = True
_UpperCAmelCase : str = GPTNeoXModel(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase : Any = model(a_ ,attention_mask=a_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self ,a_ ,a_ ,a_ ,a_ ) -> Tuple:
_UpperCAmelCase : Union[str, Any] = GPTNeoXForCausalLM(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase : Dict = model(a_ ,attention_mask=a_ ,labels=a_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self ,a_ ,a_ ,a_ ,a_ ) -> List[Any]:
_UpperCAmelCase : List[str] = self.num_labels
_UpperCAmelCase : int = GPTNeoXForQuestionAnswering(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase : str = model(a_ ,attention_mask=a_ )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _snake_case ( self ,a_ ,a_ ,a_ ,a_ ) -> List[Any]:
_UpperCAmelCase : Tuple = self.num_labels
_UpperCAmelCase : List[str] = GPTNeoXForSequenceClassification(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase : int = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_UpperCAmelCase : int = model(a_ ,attention_mask=a_ ,labels=a_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _snake_case ( self ,a_ ,a_ ,a_ ,a_ ) -> Union[str, Any]:
_UpperCAmelCase : int = self.num_labels
_UpperCAmelCase : str = GPTNeoXForTokenClassification(a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase : Any = model(a_ ,attention_mask=a_ ,labels=a_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self ,a_ ,a_ ,a_ ) -> List[Any]:
_UpperCAmelCase : Optional[Any] = True
_UpperCAmelCase : int = GPTNeoXForCausalLM(config=a_ )
model.to(a_ )
model.eval()
# first forward pass
_UpperCAmelCase : List[str] = model(a_ ,attention_mask=a_ ,use_cache=a_ )
_UpperCAmelCase : List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_UpperCAmelCase : Optional[Any] = ids_tensor((self.batch_size, 3) ,config.vocab_size )
_UpperCAmelCase : Tuple = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
_UpperCAmelCase : Optional[Any] = torch.cat([input_ids, next_tokens] ,dim=-1 )
_UpperCAmelCase : Tuple = torch.cat([input_mask, next_mask] ,dim=-1 )
_UpperCAmelCase : Dict = model(a_ ,attention_mask=a_ ,output_hidden_states=a_ )
_UpperCAmelCase : List[str] = output_from_no_past["""hidden_states"""][0]
_UpperCAmelCase : Optional[Any] = model(
a_ ,attention_mask=a_ ,past_key_values=a_ ,output_hidden_states=a_ ,)["""hidden_states"""][0]
# select random slice
_UpperCAmelCase : Union[str, Any] = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
_UpperCAmelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
_UpperCAmelCase : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a_ ,a_ ,atol=1E-3 ) )
def _snake_case ( self ) -> List[Any]:
_UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Dict = config_and_inputs
_UpperCAmelCase : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowercase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase = (GPTNeoXForCausalLM,) if is_torch_available() else ()
UpperCAmelCase = (
{
"""feature-extraction""": GPTNeoXModel,
"""question-answering""": GPTNeoXForQuestionAnswering,
"""text-classification""": GPTNeoXForSequenceClassification,
"""text-generation""": GPTNeoXForCausalLM,
"""token-classification""": GPTNeoXForTokenClassification,
"""zero-shot""": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def _snake_case ( self ) -> List[Any]:
_UpperCAmelCase : Union[str, Any] = GPTNeoXModelTester(self )
_UpperCAmelCase : Tuple = ConfigTester(self ,config_class=a_ ,hidden_size=64 ,num_attention_heads=8 )
def _snake_case ( self ) -> int:
self.config_tester.run_common_tests()
def _snake_case ( self ) -> Any:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(a_ ,a_ ,a_ )
def _snake_case ( self ) -> int:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(a_ ,a_ ,a_ )
def _snake_case ( self ) -> int:
# This regression test was failing with PyTorch < 1.3
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
_UpperCAmelCase : List[Any] = None
self.model_tester.create_and_check_model_as_decoder(a_ ,a_ ,a_ )
def _snake_case ( self ) -> List[str]:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(a_ ,a_ ,a_ )
def _snake_case ( self ) -> Any:
_UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*a_ )
def _snake_case ( self ) -> Tuple:
_UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a_ )
def _snake_case ( self ) -> Union[str, Any]:
_UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a_ )
def _snake_case ( self ) -> List[str]:
_UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a_ )
@unittest.skip(reason="""Feed forward chunking is not implemented""" )
def _snake_case ( self ) -> str:
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _snake_case ( self ,a_ ) -> List[str]:
_UpperCAmelCase ,_UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : Optional[Any] = ids_tensor([1, 10] ,config.vocab_size )
_UpperCAmelCase : Tuple = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_UpperCAmelCase : str = GPTNeoXModel(a_ )
original_model.to(a_ )
original_model.eval()
_UpperCAmelCase : str = original_model(a_ ).last_hidden_state
_UpperCAmelCase : List[Any] = original_model(a_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_UpperCAmelCase : List[str] = {"""type""": scaling_type, """factor""": 10.0}
_UpperCAmelCase : Optional[int] = GPTNeoXModel(a_ )
scaled_model.to(a_ )
scaled_model.eval()
_UpperCAmelCase : Union[str, Any] = scaled_model(a_ ).last_hidden_state
_UpperCAmelCase : int = scaled_model(a_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(a_ ,a_ ,atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(a_ ,a_ ,atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(a_ ,a_ ,atol=1E-5 ) )
@require_torch
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _snake_case ( self ) -> Union[str, Any]:
_UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
for checkpointing in [True, False]:
_UpperCAmelCase : Optional[int] = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(a_ )
_UpperCAmelCase : Optional[int] = tokenizer("""My favorite food is""" ,return_tensors="""pt""" ).to(a_ )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
_UpperCAmelCase : Dict = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"""
_UpperCAmelCase : List[str] = model.generate(**a_ ,do_sample=a_ ,max_new_tokens=20 )
_UpperCAmelCase : List[Any] = tokenizer.batch_decode(a_ )[0]
self.assertEqual(a_ ,a_ )
| 215 |
'''simple docstring'''
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : int = RemBertConfig.from_json_file(lowerCAmelCase_ )
print("""Building PyTorch model from configuration: {}""".format(str(lowerCAmelCase_ ) ) )
_UpperCAmelCase : Any = RemBertModel(lowerCAmelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(lowerCAmelCase_ ) )
torch.save(model.state_dict() , lowerCAmelCase_ )
if __name__ == "__main__":
A_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--rembert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained RemBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
A_ : Any = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 215 | 1 |
'''simple docstring'''
def lowerCamelCase__ ( __lowerCamelCase : list , __lowerCamelCase : list , __lowerCamelCase : int ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =len(__lowerCamelCase )
_UpperCAmelCase : List[str] =[[0] * n for i in range(__lowerCamelCase )]
for i in range(__lowerCamelCase ):
_UpperCAmelCase : int =y_points[i]
for i in range(2 , __lowerCamelCase ):
for j in range(__lowerCamelCase , __lowerCamelCase ):
_UpperCAmelCase : str =(
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 242 |
'''simple docstring'''
from __future__ import annotations
import bisect
def lowerCamelCase__ ( __lowerCamelCase : list[int] , __lowerCamelCase : int , __lowerCamelCase : int = 0 , __lowerCamelCase : int = -1 ):
'''simple docstring'''
if hi < 0:
_UpperCAmelCase : int =len(__lowerCamelCase )
while lo < hi:
_UpperCAmelCase : Dict =lo + (hi - lo) // 2
if sorted_collection[mid] < item:
_UpperCAmelCase : int =mid + 1
else:
_UpperCAmelCase : Union[str, Any] =mid
return lo
def lowerCamelCase__ ( __lowerCamelCase : list[int] , __lowerCamelCase : int , __lowerCamelCase : int = 0 , __lowerCamelCase : int = -1 ):
'''simple docstring'''
if hi < 0:
_UpperCAmelCase : str =len(__lowerCamelCase )
while lo < hi:
_UpperCAmelCase : List[Any] =lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
_UpperCAmelCase : Dict =mid + 1
else:
_UpperCAmelCase : Tuple =mid
return lo
def lowerCamelCase__ ( __lowerCamelCase : list[int] , __lowerCamelCase : int , __lowerCamelCase : int = 0 , __lowerCamelCase : int = -1 ):
'''simple docstring'''
sorted_collection.insert(bisect_left(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : list[int] , __lowerCamelCase : int , __lowerCamelCase : int = 0 , __lowerCamelCase : int = -1 ):
'''simple docstring'''
sorted_collection.insert(bisect_right(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : list[int] , __lowerCamelCase : int ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] =0
_UpperCAmelCase : Any =len(__lowerCamelCase ) - 1
while left <= right:
_UpperCAmelCase : int =left + (right - left) // 2
_UpperCAmelCase : Union[str, Any] =sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
_UpperCAmelCase : Optional[Any] =midpoint - 1
else:
_UpperCAmelCase : int =midpoint + 1
return None
def lowerCamelCase__ ( __lowerCamelCase : list[int] , __lowerCamelCase : int ):
'''simple docstring'''
_UpperCAmelCase : Any =bisect.bisect_left(__lowerCamelCase , __lowerCamelCase )
if index != len(__lowerCamelCase ) and sorted_collection[index] == item:
return index
return None
def lowerCamelCase__ ( __lowerCamelCase : list[int] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
'''simple docstring'''
if right < left:
return None
_UpperCAmelCase : str =left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , midpoint - 1 )
else:
return binary_search_by_recursion(__lowerCamelCase , __lowerCamelCase , midpoint + 1 , __lowerCamelCase )
if __name__ == "__main__":
lowercase =input('Enter numbers separated by comma:\n').strip()
lowercase =sorted(int(item) for item in user_input.split(','))
lowercase =int(input('Enter a single number to be found in the list:\n'))
lowercase =binary_search(collection, target)
if result is None:
print(F"""{target} was not found in {collection}.""")
else:
print(F"""{target} was found at position {result} in {collection}.""")
| 242 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
__UpperCamelCase : int
__UpperCamelCase : TreeNode | None = None
__UpperCamelCase : TreeNode | None = None
UpperCAmelCase__ : Optional[int] = namedtuple('CoinsDistribResult', 'moves excess')
def lowercase_ ( _snake_case ):
if root is None:
return 0
# Validation
def count_nodes(_snake_case ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(_snake_case ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_snake_case ) != count_coins(_snake_case ):
raise ValueError("""The nodes number should be same as the number of coins""" )
# Main calculation
def get_distrib(_snake_case ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 ,1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = get_distrib(node.left )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = get_distrib(node.right )
SCREAMING_SNAKE_CASE__ : str = 1 - left_distrib_excess
SCREAMING_SNAKE_CASE__ : int = 1 - right_distrib_excess
SCREAMING_SNAKE_CASE__ : Tuple = (
left_distrib_moves
+ right_distrib_moves
+ abs(_snake_case )
+ abs(_snake_case )
)
SCREAMING_SNAKE_CASE__ : Any = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_snake_case ,_snake_case )
return get_distrib(_snake_case )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
lowerCAmelCase_ = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''DPTFeatureExtractor''']
lowerCAmelCase_ = ['''DPTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DPTForDepthEstimation''',
'''DPTForSemanticSegmentation''',
'''DPTModel''',
'''DPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 8 | 0 |
'''simple docstring'''
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : List[str] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {
"kakaobrain/align-base": "https://huggingface.co/kakaobrain/align-base/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """align_text_model"""
def __init__( self : List[Any] , UpperCamelCase__ : Dict=3_0_5_2_2 , UpperCamelCase__ : Any=7_6_8 , UpperCamelCase__ : List[str]=1_2 , UpperCamelCase__ : Union[str, Any]=1_2 , UpperCamelCase__ : Dict=3_0_7_2 , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : List[Any]=5_1_2 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : List[Any]=0.0_2 , UpperCamelCase__ : List[Any]=1E-1_2 , UpperCamelCase__ : Union[str, Any]=0 , UpperCamelCase__ : Optional[int]="absolute" , UpperCamelCase__ : Optional[int]=True , **UpperCamelCase__ : Dict , ):
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = position_embedding_type
UpperCamelCase = use_cache
UpperCamelCase = pad_token_id
@classmethod
def A ( cls : List[str] , UpperCamelCase__ : Union[str, os.PathLike] , **UpperCamelCase__ : Dict ):
"""simple docstring"""
cls._set_token_in_kwargs(UpperCamelCase__ )
UpperCamelCase , UpperCamelCase = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
UpperCamelCase = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """align_vision_model"""
def __init__( self : int , UpperCamelCase__ : int = 3 , UpperCamelCase__ : int = 6_0_0 , UpperCamelCase__ : float = 2.0 , UpperCamelCase__ : float = 3.1 , UpperCamelCase__ : int = 8 , UpperCamelCase__ : List[int] = [3, 3, 5, 3, 5, 5, 3] , UpperCamelCase__ : List[int] = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , UpperCamelCase__ : List[int] = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , UpperCamelCase__ : List[int] = [] , UpperCamelCase__ : List[int] = [1, 2, 2, 2, 1, 2, 1] , UpperCamelCase__ : List[int] = [1, 2, 2, 3, 3, 4, 1] , UpperCamelCase__ : List[int] = [1, 6, 6, 6, 6, 6, 6] , UpperCamelCase__ : float = 0.2_5 , UpperCamelCase__ : str = "swish" , UpperCamelCase__ : int = 2_5_6_0 , UpperCamelCase__ : str = "mean" , UpperCamelCase__ : float = 0.0_2 , UpperCamelCase__ : float = 0.0_0_1 , UpperCamelCase__ : float = 0.9_9 , UpperCamelCase__ : float = 0.2 , **UpperCamelCase__ : Optional[int] , ):
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = width_coefficient
UpperCamelCase = depth_coefficient
UpperCamelCase = depth_divisor
UpperCamelCase = kernel_sizes
UpperCamelCase = in_channels
UpperCamelCase = out_channels
UpperCamelCase = depthwise_padding
UpperCamelCase = strides
UpperCamelCase = num_block_repeats
UpperCamelCase = expand_ratios
UpperCamelCase = squeeze_expansion_ratio
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dim
UpperCamelCase = pooling_type
UpperCamelCase = initializer_range
UpperCamelCase = batch_norm_eps
UpperCamelCase = batch_norm_momentum
UpperCamelCase = drop_connect_rate
UpperCamelCase = sum(UpperCamelCase__ ) * 4
@classmethod
def A ( cls : Union[str, Any] , UpperCamelCase__ : Union[str, os.PathLike] , **UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
cls._set_token_in_kwargs(UpperCamelCase__ )
UpperCamelCase , UpperCamelCase = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
UpperCamelCase = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """align"""
_SCREAMING_SNAKE_CASE = True
def __init__( self : Union[str, Any] , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Optional[int]=6_4_0 , UpperCamelCase__ : Optional[Any]=1.0 , UpperCamelCase__ : Tuple=0.0_2 , **UpperCamelCase__ : Optional[Any] , ):
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
if text_config is None:
UpperCamelCase = {}
logger.info('text_config is None. Initializing the AlignTextConfig with default values.' )
if vision_config is None:
UpperCamelCase = {}
logger.info('vision_config is None. Initializing the AlignVisionConfig with default values.' )
UpperCamelCase = AlignTextConfig(**UpperCamelCase__ )
UpperCamelCase = AlignVisionConfig(**UpperCamelCase__ )
UpperCamelCase = projection_dim
UpperCamelCase = temperature_init_value
UpperCamelCase = initializer_range
@classmethod
def A ( cls : Optional[Any] , UpperCamelCase__ : AlignTextConfig , UpperCamelCase__ : AlignVisionConfig , **UpperCamelCase__ : Any ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCamelCase__ )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = copy.deepcopy(self.__dict__ )
UpperCamelCase = self.text_config.to_dict()
UpperCamelCase = self.vision_config.to_dict()
UpperCamelCase = self.__class__.model_type
return output
| 352 |
'''simple docstring'''
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
_lowerCamelCase : List[Any] = {
"n_samples": 64,
"horizon": 32,
"num_inference_steps": 20,
"n_guide_steps": 2, # can set to 0 for faster sampling, does not use value network
"scale_grad_by_std": True,
"scale": 0.1,
"eta": 0.0,
"t_grad_cutoff": 2,
"device": "cpu",
}
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = "hopper-medium-v2"
_lowerCamelCase : Optional[int] = gym.make(env_name)
_lowerCamelCase : Any = ValueGuidedRLPipeline.from_pretrained(
"bglick13/hopper-medium-v2-value-function-hor32",
env=env,
)
env.seed(0)
_lowerCamelCase : Dict = env.reset()
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : str = 0
_lowerCamelCase : Dict = 1000
_lowerCamelCase : Tuple = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
_lowerCamelCase : List[str] = pipeline(obs, planning_horizon=32)
# execute action in environment
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase : Dict = env.step(denorm_actions)
_lowerCamelCase : Optional[Any] = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'''
f''' {total_score}'''
)
# save observations for rendering
rollout.append(next_observation.copy())
_lowerCamelCase : Dict = next_observation
except KeyboardInterrupt:
pass
print(f'''Total reward: {total_reward}''')
| 249 | 0 |
def _lowerCAmelCase ( lowerCAmelCase_ :int | float | str )->tuple[int, int]:
'''simple docstring'''
try:
snake_case_ = float(lowerCAmelCase_ )
except ValueError:
raise ValueError("Please enter a valid number" )
snake_case_ = decimal - int(lowerCAmelCase_ )
if fractional_part == 0:
return int(lowerCAmelCase_ ), 1
else:
snake_case_ = len(str(lowerCAmelCase_ ).split("." )[1] )
snake_case_ = int(decimal * (10**number_of_frac_digits) )
snake_case_ = 10**number_of_frac_digits
snake_case_ , snake_case_ = denominator, numerator
while True:
snake_case_ = dividend % divisor
if remainder == 0:
break
snake_case_ , snake_case_ = divisor, remainder
snake_case_ , snake_case_ = numerator / divisor, denominator / divisor
return int(lowerCAmelCase_ ), int(lowerCAmelCase_ )
if __name__ == "__main__":
print(F'''{decimal_to_fraction(2) = }''')
print(F'''{decimal_to_fraction(89.0) = }''')
print(F'''{decimal_to_fraction('67') = }''')
print(F'''{decimal_to_fraction('45.0') = }''')
print(F'''{decimal_to_fraction(1.5) = }''')
print(F'''{decimal_to_fraction('6.25') = }''')
print(F'''{decimal_to_fraction('78td') = }''')
| 159 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
SCREAMING_SNAKE_CASE :Dict = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __lowerCAmelCase ( a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ['pixel_values']
def __init__( self : Optional[Any] , _lowerCAmelCase : bool = True , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _lowerCAmelCase : bool = True , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : bool = True , _lowerCAmelCase : Union[int, float] = 1 / 2_5_5 , _lowerCAmelCase : bool = True , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : bool = True , **_lowerCAmelCase : int , ) -> None:
"""simple docstring"""
super().__init__(**_lowerCAmelCase )
snake_case_ = size if size is not None else {"shortest_edge": 2_2_4}
snake_case_ = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
snake_case_ = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
snake_case_ = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase , param_name="crop_size" )
snake_case_ = do_resize
snake_case_ = size
snake_case_ = resample
snake_case_ = do_center_crop
snake_case_ = crop_size
snake_case_ = do_rescale
snake_case_ = rescale_factor
snake_case_ = do_normalize
snake_case_ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
snake_case_ = image_std if image_std is not None else OPENAI_CLIP_STD
snake_case_ = do_convert_rgb
def lowerCAmelCase__ ( self : Dict , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Dict[str, int] , _lowerCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : int , ) -> np.ndarray:
"""simple docstring"""
snake_case_ = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
snake_case_ = get_resize_output_image_size(_lowerCAmelCase , size=size["shortest_edge"] , default_to_square=_lowerCAmelCase )
return resize(_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self : Any , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Dict[str, int] , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : List[Any] , ) -> np.ndarray:
"""simple docstring"""
snake_case_ = get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(_lowerCAmelCase , size=(size["height"], size["width"]) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self : Optional[int] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Union[int, float] , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self : str , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Union[float, List[float]] , _lowerCAmelCase : Union[float, List[float]] , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : Optional[int] , ) -> np.ndarray:
"""simple docstring"""
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self : Optional[int] , _lowerCAmelCase : ImageInput , _lowerCAmelCase : bool = None , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : PILImageResampling = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : int = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : float = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : Optional[Union[str, TensorType]] = None , _lowerCAmelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , **_lowerCAmelCase : Optional[int] , ) -> PIL.Image.Image:
"""simple docstring"""
snake_case_ = do_resize if do_resize is not None else self.do_resize
snake_case_ = size if size is not None else self.size
snake_case_ = get_size_dict(_lowerCAmelCase , param_name="size" , default_to_square=_lowerCAmelCase )
snake_case_ = resample if resample is not None else self.resample
snake_case_ = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ = crop_size if crop_size is not None else self.crop_size
snake_case_ = get_size_dict(_lowerCAmelCase , param_name="crop_size" , default_to_square=_lowerCAmelCase )
snake_case_ = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ = image_mean if image_mean is not None else self.image_mean
snake_case_ = image_std if image_std is not None else self.image_std
snake_case_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
snake_case_ = make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
snake_case_ = [convert_to_rgb(_lowerCAmelCase ) for image in images]
# All transformations expect numpy arrays.
snake_case_ = [to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
snake_case_ = [self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase ) for image in images]
if do_center_crop:
snake_case_ = [self.center_crop(image=_lowerCAmelCase , size=_lowerCAmelCase ) for image in images]
if do_rescale:
snake_case_ = [self.rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase ) for image in images]
if do_normalize:
snake_case_ = [self.normalize(image=_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase ) for image in images]
snake_case_ = [to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
snake_case_ = {"pixel_values": images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
| 159 | 1 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 369 |
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 | 0 |
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a=1_3 , _a=3_2 , _a=3 , _a=4 , _a=[1_0, 2_0, 3_0, 4_0] , _a=[2, 2, 3, 2] , _a=True , _a=True , _a=3_7 , _a="gelu" , _a=1_0 , _a=0.02 , _a=["stage2", "stage3", "stage4"] , _a=[2, 3, 4] , _a=None , ) -> Tuple:
_a : Any = parent
_a : List[str] = batch_size
_a : Optional[int] = image_size
_a : Dict = num_channels
_a : str = num_stages
_a : int = hidden_sizes
_a : int = depths
_a : int = is_training
_a : str = use_labels
_a : Tuple = intermediate_size
_a : int = hidden_act
_a : List[Any] = num_labels
_a : List[str] = initializer_range
_a : Tuple = out_features
_a : Dict = out_indices
_a : Any = scope
def __lowercase ( self ) -> List[Any]:
_a : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Optional[Any] = None
if self.use_labels:
_a : int = ids_tensor([self.batch_size] , self.num_labels )
_a : Any = self.get_config()
return config, pixel_values, labels
def __lowercase ( self ) -> Dict:
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def __lowercase ( self , _a , _a , _a ) -> int:
_a : int = ConvNextVaModel(config=_a )
model.to(_a )
model.eval()
_a : Union[str, Any] = model(_a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def __lowercase ( self , _a , _a , _a ) -> List[Any]:
_a : Union[str, Any] = ConvNextVaForImageClassification(_a )
model.to(_a )
model.eval()
_a : Optional[int] = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self , _a , _a , _a ) -> Tuple:
_a : Dict = ConvNextVaBackbone(config=_a )
model.to(_a )
model.eval()
_a : str = model(_a )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_a : Dict = None
_a : int = ConvNextVaBackbone(config=_a )
model.to(_a )
model.eval()
_a : int = model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __lowercase ( self ) -> Dict:
_a : List[Any] = self.prepare_config_and_inputs()
_a , _a , _a : List[Any] = config_and_inputs
_a : Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
def __lowercase ( self ) -> List[str]:
_a : Union[str, Any] = self.prepare_config_and_inputs()
_a , _a , _a : Union[str, Any] = config_and_inputs
_a : List[str] = {'''pixel_values''': pixel_values, '''labels''': labels}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : str = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : int = False
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Optional[int] = False
def __lowercase ( self ) -> Tuple:
_a : Any = ConvNextVaModelTester(self )
_a : str = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=3_7 )
def __lowercase ( self ) -> str:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowercase ( self ) -> Optional[int]:
return
@unittest.skip(reason='''ConvNextV2 does not use inputs_embeds''' )
def __lowercase ( self ) -> int:
pass
@unittest.skip(reason='''ConvNextV2 does not support input and output embeddings''' )
def __lowercase ( self ) -> List[Any]:
pass
@unittest.skip(reason='''ConvNextV2 does not use feedforward chunking''' )
def __lowercase ( self ) -> Optional[int]:
pass
def __lowercase ( self ) -> Optional[Any]:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
_a : str = True
if model_class.__name__ in [
*get_values(_a ),
*get_values(_a ),
]:
continue
_a : Tuple = model_class(_a )
model.to(_a )
model.train()
_a : str = self._prepare_for_class(_a , _a , return_labels=_a )
_a : Tuple = model(**_a ).loss
loss.backward()
def __lowercase ( self ) -> Optional[int]:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_a , _a : Optional[int] = self.model_tester.prepare_config_and_inputs_with_labels()
_a : Optional[int] = False
_a : Union[str, Any] = True
if (
model_class.__name__
in [*get_values(_a ), *get_values(_a )]
or not model_class.supports_gradient_checkpointing
):
continue
_a : Any = model_class(_a )
model.to(_a )
model.gradient_checkpointing_enable()
model.train()
_a : Any = self._prepare_for_class(_a , _a , return_labels=_a )
_a : str = model(**_a ).loss
loss.backward()
def __lowercase ( self ) -> Tuple:
_a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[str] = model_class(_a )
_a : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Optional[Any] = [*signature.parameters.keys()]
_a : Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __lowercase ( self ) -> Any:
_a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self ) -> Any:
def check_hidden_states_output(_a , _a , _a ):
_a : List[Any] = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_a : Dict = model(**self._prepare_for_class(_a , _a ) )
_a : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_a : Dict = self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_a , _a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : int = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : Optional[int] = True
check_hidden_states_output(_a , _a , _a )
def __lowercase ( self ) -> Dict:
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __lowercase ( self ) -> List[str]:
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Optional[Any] = ConvNextVaModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def __UpperCAmelCase ( ) -> Any:
"""simple docstring"""
_a : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self ) -> List[Any]:
return AutoImageProcessor.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ) if is_vision_available() else None
@slow
def __lowercase ( self ) -> Optional[int]:
_a : Any = ConvNextVaForImageClassification.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ).to(_a )
_a : Optional[int] = self.default_image_processor
_a : Optional[Any] = prepare_img()
_a : Any = preprocessor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
_a : Optional[int] = model(**_a )
# verify the logits
_a : int = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _a )
_a : Optional[Any] = torch.tensor([0.9996, 0.1966, -0.4386] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
| 235 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a__ = logging.get_logger(__name__)
a__ = {
'''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''',
}
class UpperCAmelCase_ ( __lowercase , __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Any = "bit"
UpperCAmelCase__ : Optional[int] = ["preactivation", "bottleneck"]
UpperCAmelCase__ : Optional[Any] = ["SAME", "VALID"]
def __init__( self , _a=3 , _a=6_4 , _a=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , _a=[3, 4, 6, 3] , _a="preactivation" , _a="relu" , _a=None , _a=3_2 , _a=0.0 , _a=False , _a=3_2 , _a=1 , _a=None , _a=None , **_a , ) -> Union[str, Any]:
super().__init__(**_a )
if layer_type not in self.layer_types:
raise ValueError(F"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
_a : Any = global_padding.upper()
else:
raise ValueError(F"""Padding strategy {global_padding} not supported""" )
_a : Optional[int] = num_channels
_a : List[Any] = embedding_size
_a : Any = hidden_sizes
_a : int = depths
_a : Dict = layer_type
_a : int = hidden_act
_a : Optional[Any] = global_padding
_a : Optional[Any] = num_groups
_a : Union[str, Any] = drop_path_rate
_a : Tuple = embedding_dynamic_padding
_a : Union[str, Any] = output_stride
_a : Any = width_factor
_a : Any = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(_a ) + 1 )]
_a , _a : List[str] = get_aligned_output_features_output_indices(
out_features=_a , out_indices=_a , stage_names=self.stage_names )
| 235 | 1 |
import os
from pathlib import Path
def lowerCamelCase__ ( ) -> Optional[Any]:
from torch.utils.cpp_extension import load
UpperCamelCase_ = Path(_a ).resolve().parent.parent.parent / """kernels""" / """deformable_detr"""
UpperCamelCase_ = [
root / filename
for filename in [
"""vision.cpp""",
os.path.join("""cpu""" , """ms_deform_attn_cpu.cpp""" ),
os.path.join("""cuda""" , """ms_deform_attn_cuda.cu""" ),
]
]
load(
"""MultiScaleDeformableAttention""" , _a , with_cuda=_a , extra_include_paths=[str(_a )] , extra_cflags=["""-DWITH_CUDA=1"""] , extra_cuda_cflags=[
"""-DCUDA_HAS_FP16=1""",
"""-D__CUDA_NO_HALF_OPERATORS__""",
"""-D__CUDA_NO_HALF_CONVERSIONS__""",
"""-D__CUDA_NO_HALF2_OPERATORS__""",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 365 |
from __future__ import annotations
def lowerCamelCase__ ( a__ : list[list[int]] ) -> int:
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(a__ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(a__ ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 261 | 0 |
"""simple docstring"""
import functools
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
# Validation
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not all(isinstance(UpperCamelCase__ , UpperCamelCase__ ) for day in days ):
raise ValueError("""The parameter days should be a list of integers""" )
if len(UpperCamelCase__ ) != 3 or not all(isinstance(UpperCamelCase__ , UpperCamelCase__ ) for cost in costs ):
raise ValueError("""The parameter costs should be a list of three integers""" )
if len(UpperCamelCase__ ) == 0:
return 0
if min(UpperCamelCase__ ) <= 0:
raise ValueError("""All days elements should be greater than 0""" )
if max(UpperCamelCase__ ) >= 3_6_6:
raise ValueError("""All days elements should be less than 366""" )
_a : List[Any] = set(UpperCamelCase__ )
@functools.cache
def dynamic_programming(UpperCamelCase__ ) -> int:
if index > 3_6_5:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 3_0 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 294 |
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : int = '''mvp'''
UpperCamelCase : Union[str, Any] = ['''past_key_values''']
UpperCamelCase : Any = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] , UpperCAmelCase__ : List[str]=50267 , UpperCAmelCase__ : Optional[Any]=1024 , UpperCAmelCase__ : Tuple=12 , UpperCAmelCase__ : Optional[Any]=4096 , UpperCAmelCase__ : int=16 , UpperCAmelCase__ : Tuple=12 , UpperCAmelCase__ : int=4096 , UpperCAmelCase__ : List[Any]=16 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Tuple="gelu" , UpperCAmelCase__ : Union[str, Any]=1024 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : Dict=0.0 , UpperCAmelCase__ : Tuple=0.0_2 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Tuple=1 , UpperCAmelCase__ : Dict=0 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : Dict=100 , UpperCAmelCase__ : Union[str, Any]=800 , **UpperCAmelCase__ : Dict , ) -> List[Any]:
_a : Any = vocab_size
_a : Any = max_position_embeddings
_a : Union[str, Any] = d_model
_a : List[str] = encoder_ffn_dim
_a : List[Any] = encoder_layers
_a : Dict = encoder_attention_heads
_a : Tuple = decoder_ffn_dim
_a : List[Any] = decoder_layers
_a : Optional[Any] = decoder_attention_heads
_a : Optional[Any] = dropout
_a : str = attention_dropout
_a : Dict = activation_dropout
_a : Any = activation_function
_a : Tuple = init_std
_a : Dict = encoder_layerdrop
_a : Optional[int] = decoder_layerdrop
_a : Optional[Any] = classifier_dropout
_a : List[Any] = use_cache
_a : Dict = encoder_layers
_a : str = scale_embedding # scale factor will be sqrt(d_model) if True
_a : int = use_prompt
_a : Dict = prompt_length
_a : Dict = prompt_mid_dim
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , is_encoder_decoder=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , forced_eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , UpperCAmelCase__ ):
_a : List[str] = self.bos_token_id
warnings.warn(
f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
"""The config can simply be saved and uploaded again to be fixed.""" )
| 294 | 1 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class __A (snake_case__):
'''simple docstring'''
def __init__( self : Tuple , UpperCAmelCase_ : List[Any] ) ->List[str]:
"""simple docstring"""
snake_case_ = data
def __iter__( self : Union[str, Any] ) ->Any:
"""simple docstring"""
for element in self.data:
yield element
def _a ( _SCREAMING_SNAKE_CASE=True ) -> List[str]:
snake_case_ = Accelerator(even_batches=_SCREAMING_SNAKE_CASE )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ) -> Dict:
if iterable:
snake_case_ = DummyIterableDataset(torch.as_tensor(range(_SCREAMING_SNAKE_CASE ) ) )
else:
snake_case_ = TensorDataset(torch.as_tensor(range(_SCREAMING_SNAKE_CASE ) ) )
snake_case_ = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
snake_case_ = accelerator.prepare(_SCREAMING_SNAKE_CASE )
return dl
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> Optional[int]:
snake_case_ = create_dataloader(accelerator=_SCREAMING_SNAKE_CASE , dataset_size=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
snake_case_ = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def _a ( ) -> Optional[int]:
snake_case_ = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
_SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
_SCREAMING_SNAKE_CASE , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def _a ( ) -> List[Any]:
snake_case_ = create_accelerator(even_batches=_SCREAMING_SNAKE_CASE )
verify_dataloader_batch_sizes(
_SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
_SCREAMING_SNAKE_CASE , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def _a ( ) -> Dict:
snake_case_ = create_accelerator(even_batches=_SCREAMING_SNAKE_CASE )
snake_case_ = torch.nn.Linear(1 , 1 )
snake_case_ = accelerator.prepare(_SCREAMING_SNAKE_CASE )
snake_case_ = create_dataloader(_SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 )
snake_case_ = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(_SCREAMING_SNAKE_CASE ):
snake_case_ = ddp_model(batch[0].float() )
snake_case_ = output.sum()
loss.backward()
batch_idxs.append(_SCREAMING_SNAKE_CASE )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def _a ( _SCREAMING_SNAKE_CASE ) -> Tuple:
with warnings.catch_warnings(record=_SCREAMING_SNAKE_CASE ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , _SCREAMING_SNAKE_CASE )
assert "only supported for multi-GPU" in str(w[-1].message )
def _a ( ) -> Any:
snake_case_ = True
snake_case_ = False
snake_case_ = create_accelerator(even_batches=_SCREAMING_SNAKE_CASE )
snake_case_ = torch.nn.Linear(1 , 1 )
snake_case_ = accelerator.prepare(_SCREAMING_SNAKE_CASE )
snake_case_ = create_dataloader(_SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 )
snake_case_ = create_dataloader(_SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=_SCREAMING_SNAKE_CASE ):
snake_case_ = train_dl.batch_sampler.even_batches
snake_case_ = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def _a ( ) -> Optional[Any]:
snake_case_ = True
snake_case_ = False
snake_case_ = create_accelerator(even_batches=_SCREAMING_SNAKE_CASE )
snake_case_ = torch.nn.Linear(1 , 1 )
snake_case_ = accelerator.prepare(_SCREAMING_SNAKE_CASE )
create_dataloader(_SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 , iterable=_SCREAMING_SNAKE_CASE )
snake_case_ = create_dataloader(_SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("""ignore""" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=_SCREAMING_SNAKE_CASE ):
snake_case_ = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def _a ( ) -> Dict:
snake_case_ = create_accelerator()
snake_case_ = torch.nn.Linear(1 , 1 )
snake_case_ = accelerator.prepare(_SCREAMING_SNAKE_CASE )
create_dataloader(_SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 , iterable=_SCREAMING_SNAKE_CASE )
with warnings.catch_warnings(record=_SCREAMING_SNAKE_CASE ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=_SCREAMING_SNAKE_CASE ):
pass
assert issubclass(w[-1].category , _SCREAMING_SNAKE_CASE )
assert "only supported for map-style datasets" in str(w[-1].message )
def _a ( ) -> Dict:
snake_case_ = create_accelerator()
accelerator.print("""Test that even_batches variable ensures uniform batches across processes""" )
test_default_ensures_even_batch_sizes()
accelerator.print("""Run tests with even_batches disabled""" )
test_can_disable_even_batches()
accelerator.print("""Test joining uneven inputs""" )
test_can_join_uneven_inputs()
accelerator.print("""Test overriding even_batches when joining uneven inputs""" )
test_join_can_override_even_batches()
accelerator.print("""Test overriding even_batches for mixed dataloader types""" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("""Test overriding even_batches raises a warning for iterable dataloaders""" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("""Test join with non DDP distributed raises warning""" )
snake_case_ = accelerator.state.distributed_type
snake_case_ = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(_SCREAMING_SNAKE_CASE )
snake_case_ = original_state
if __name__ == "__main__":
main()
| 233 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 ) -> list:
snake_case_ = length or len(_SCREAMING_SNAKE_CASE )
snake_case_ = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
snake_case_ , snake_case_ = list_data[i + 1], list_data[i]
snake_case_ = True
return list_data if not swapped else bubble_sort(_SCREAMING_SNAKE_CASE , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 233 | 1 |
import mpmath # for roots of unity
import numpy as np
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : str=None , UpperCamelCase__ : int=None ) -> List[str]:
"""simple docstring"""
__magic_name__ = list(poly_a or [0] )[:]
__magic_name__ = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
__magic_name__ = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
__magic_name__ = len(self.polyB )
# Add 0 to make lengths equal a power of 2
__magic_name__ = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
__magic_name__ = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
__magic_name__ = self.__multiply()
def _lowercase ( self : int , UpperCamelCase__ : Any ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB]
# Corner case
if len(UpperCamelCase__ ) <= 1:
return dft[0]
#
__magic_name__ = self.c_max_length // 2
while next_ncol > 0:
__magic_name__ = [[] for i in range(UpperCamelCase__ )]
__magic_name__ = self.root**next_ncol
# First half of next step
__magic_name__ = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCamelCase__ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
__magic_name__ = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCamelCase__ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
__magic_name__ = new_dft
__magic_name__ = next_ncol // 2
return dft[0]
def _lowercase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__magic_name__ = self.__dft("""A""" )
__magic_name__ = self.__dft("""B""" )
__magic_name__ = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
__magic_name__ = 2
while next_ncol <= self.c_max_length:
__magic_name__ = [[] for i in range(UpperCamelCase__ )]
__magic_name__ = self.root ** (next_ncol // 2)
__magic_name__ = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
__magic_name__ = new_inverse_c
next_ncol *= 2
# Unpack
__magic_name__ = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__magic_name__ = """A = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
__magic_name__ = """B = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
__magic_name__ = """A*B = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return F'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase : Union[str, Any] = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : int = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 115 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( snake_case : list )-> list:
def merge(snake_case : list , snake_case : list ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(snake_case ) <= 1:
return collection
_lowerCamelCase = len(snake_case ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : int =input("""Enter numbers separated by a comma:\n""").strip()
A_ : Dict =[int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""")
| 80 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __a ( unittest.TestCase ):
def snake_case_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def snake_case_ ( self ):
_lowerCamelCase , _lowerCamelCase = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' , from_pt=a__ , dtype=jnp.bfloataa )
_lowerCamelCase , _lowerCamelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=a__ , from_pt=a__ , dtype=jnp.bfloataa )
_lowerCamelCase = controlnet_params
_lowerCamelCase = 'bird'
_lowerCamelCase = jax.device_count()
_lowerCamelCase = pipe.prepare_text_inputs([prompts] * num_samples )
_lowerCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
_lowerCamelCase = pipe.prepare_image_inputs([canny_image] * num_samples )
_lowerCamelCase = jax.random.PRNGKey(0 )
_lowerCamelCase = jax.random.split(a__ , jax.device_count() )
_lowerCamelCase = replicate(a__ )
_lowerCamelCase = shard(a__ )
_lowerCamelCase = shard(a__ )
_lowerCamelCase = pipe(
prompt_ids=a__ , image=a__ , params=a__ , prng_seed=a__ , num_inference_steps=50 , jit=a__ , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
_lowerCamelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_lowerCamelCase = images[0, 2_53:2_56, 2_53:2_56, -1]
_lowerCamelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_lowerCamelCase = jnp.array(
[0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def snake_case_ ( self ):
_lowerCamelCase , _lowerCamelCase = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' , from_pt=a__ , dtype=jnp.bfloataa )
_lowerCamelCase , _lowerCamelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=a__ , from_pt=a__ , dtype=jnp.bfloataa )
_lowerCamelCase = controlnet_params
_lowerCamelCase = 'Chef in the kitchen'
_lowerCamelCase = jax.device_count()
_lowerCamelCase = pipe.prepare_text_inputs([prompts] * num_samples )
_lowerCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
_lowerCamelCase = pipe.prepare_image_inputs([pose_image] * num_samples )
_lowerCamelCase = jax.random.PRNGKey(0 )
_lowerCamelCase = jax.random.split(a__ , jax.device_count() )
_lowerCamelCase = replicate(a__ )
_lowerCamelCase = shard(a__ )
_lowerCamelCase = shard(a__ )
_lowerCamelCase = pipe(
prompt_ids=a__ , image=a__ , params=a__ , prng_seed=a__ , num_inference_steps=50 , jit=a__ , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
_lowerCamelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_lowerCamelCase = images[0, 2_53:2_56, 2_53:2_56, -1]
_lowerCamelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_lowerCamelCase = jnp.array(
[[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 80 | 1 |
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
A : int = ['bart.large', 'bart.large.mnli', 'bart.large.cnn', 'bart_xsum/model.pt']
A : Dict = {'bart.large': BartModel, 'bart.large.mnli': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('0.9.0'):
raise Exception('requires fairseq >= 0.9.0')
logging.set_verbosity_info()
A : Dict = logging.get_logger(__name__)
A : Optional[Any] = ' Hello world! cécé herlolip'
A : str = [
('model.classification_heads.mnli.dense.weight', 'classification_head.dense.weight'),
('model.classification_heads.mnli.dense.bias', 'classification_head.dense.bias'),
('model.classification_heads.mnli.out_proj.weight', 'classification_head.out_proj.weight'),
('model.classification_heads.mnli.out_proj.bias', 'classification_head.out_proj.bias'),
]
def __lowerCAmelCase ( a__ ) -> Optional[Any]:
__a = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
]
for k in ignore_keys:
state_dict.pop(a__ , a__ )
def __lowerCAmelCase ( a__ , a__ , a__ ) -> Optional[Any]:
__a = dct.pop(a__ )
__a = val
def __lowerCAmelCase ( a__ ) -> Optional[int]:
__a = torch.load(a__ , map_location='''cpu''' )
__a = torch.hub.load('''pytorch/fairseq''' , '''bart.large.cnn''' ).eval()
hub_interface.model.load_state_dict(sd['''model'''] )
return hub_interface
def __lowerCAmelCase ( a__ ) -> Optional[int]:
__a , __a = emb.weight.shape
__a = nn.Linear(a__ , a__ , bias=a__ )
__a = emb.weight.data
return lin_layer
@torch.no_grad()
def __lowerCAmelCase ( a__ , a__ , a__=None ) -> List[str]:
if not os.path.exists(a__ ):
__a = torch.hub.load('''pytorch/fairseq''' , a__ ).eval()
else:
__a = load_xsum_checkpoint(a__ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
__a = checkpoint_path.replace('''.''' , '''-''' )
__a = BartConfig.from_pretrained(a__ )
__a = bart.encode(a__ ).unsqueeze(0 )
__a = BartTokenizer.from_pretrained(a__ ).encode(a__ , return_tensors='''pt''' ).unsqueeze(0 )
if not torch.eq(a__ , a__ ).all():
raise ValueError(
F"""converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}""" )
if checkpoint_path == "bart.large.mnli":
__a = bart.state_dict()
remove_ignore_keys_(a__ )
__a = state_dict['''model.decoder.embed_tokens.weight''']
for src, dest in mnli_rename_keys:
rename_key(a__ , a__ , a__ )
__a = BartForSequenceClassification(a__ ).eval()
model.load_state_dict(a__ )
__a = bart.predict('''mnli''' , a__ , return_logits=a__ )
__a = model(a__ )[0] # logits
else: # no classification heads to worry about
__a = bart.model.state_dict()
remove_ignore_keys_(a__ )
__a = state_dict['''decoder.embed_tokens.weight''']
__a = bart.extract_features(a__ )
if hf_checkpoint_name == "facebook/bart-large":
__a = BartModel(a__ ).eval()
model.load_state_dict(a__ )
__a = model(a__ ).model[0]
else:
__a = BartForConditionalGeneration(a__ ).eval() # an existing summarization ckpt
model.model.load_state_dict(a__ )
if hasattr(a__ , '''lm_head''' ):
__a = make_linear_from_emb(model.model.shared )
__a = model.model(a__ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F"""`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}""" )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError('''Some values in `fairseq_output` are different from `new_model_outputs`''' )
Path(a__ ).mkdir(exist_ok=a__ )
model.save_pretrained(a__ )
if __name__ == "__main__":
A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config', default=None, type=str, help='Which huggingface architecture to use: bart-large-xsum'
)
A : Dict = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config) | 6 |
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def __lowerCAmelCase ( a__ , a__ , a__ ) -> tuple[int | None, int | None, float]:
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
__a = (low + high) // 2
__a , __a , __a = max_subarray(a__ , a__ , a__ )
__a , __a , __a = max_subarray(a__ , mid + 1 , a__ )
__a , __a , __a = max_cross_sum(a__ , a__ , a__ , a__ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def __lowerCAmelCase ( a__ , a__ , a__ , a__ ) -> tuple[int, int, float]:
__a , __a = float('''-inf''' ), -1
__a , __a = float('''-inf''' ), -1
__a = 0
for i in range(a__ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
__a = summ
__a = i
__a = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
__a = summ
__a = i
return max_left, max_right, (left_sum + right_sum)
def __lowerCAmelCase ( a__ ) -> float:
__a = [randint(1 , a__ ) for _ in range(a__ )]
__a = time.time()
max_subarray(a__ , 0 , input_size - 1 )
__a = time.time()
return end - start
def __lowerCAmelCase ( ) -> None:
__a = [10, 100, 1000, 1_0000, 5_0000, 10_0000, 20_0000, 30_0000, 40_0000, 50_0000]
__a = [time_max_subarray(a__ ) for input_size in input_sizes]
print('''No of Inputs\t\tTime Taken''' )
for input_size, runtime in zip(a__ , a__ ):
print(a__ , '''\t\t''' , a__ )
plt.plot(a__ , a__ )
plt.xlabel('''Number of Inputs''' )
plt.ylabel('''Time taken in seconds''' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod() | 6 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class A ( unittest.TestCase ):
__UpperCAmelCase : List[str] = ViTImageProcessor if is_vision_available() else None
@property
def lowercase_ (self : Dict ) -> Optional[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ (self : Union[str, Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = (3, 3_2, 1_2_8)
UpperCAmelCase__ = tempfile.mkdtemp()
# fmt: off
UpperCAmelCase__ = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
UpperCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + "\n" )
UpperCAmelCase__ = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 3_2, """width""": 1_2_8},
}
UpperCAmelCase__ = os.path.join(self.tmpdirname , __UpperCAmelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def lowercase_ (self : int , **__UpperCAmelCase : str ) -> Tuple:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def lowercase_ (self : int , **__UpperCAmelCase : Tuple ) -> List[str]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def lowercase_ (self : List[str] ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowercase_ (self : Tuple ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )
UpperCAmelCase__ = Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) )
return image_input
def lowercase_ (self : str ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase__ = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__UpperCAmelCase )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def lowercase_ (self : int ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase__ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
UpperCAmelCase__ = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 )
UpperCAmelCase__ = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def lowercase_ (self : Any ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
UpperCAmelCase__ = self.prepare_image_inputs()
UpperCAmelCase__ = image_processor(__UpperCAmelCase , return_tensors="np" )
UpperCAmelCase__ = processor(images=__UpperCAmelCase , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowercase_ (self : Optional[int] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
UpperCAmelCase__ = """test"""
UpperCAmelCase__ = processor(text=__UpperCAmelCase )
UpperCAmelCase__ = tokenizer(__UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase_ (self : Dict ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
UpperCAmelCase__ = """test"""
UpperCAmelCase__ = self.prepare_image_inputs()
UpperCAmelCase__ = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "labels"] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def lowercase_ (self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
UpperCAmelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase__ = processor.char_decode(__UpperCAmelCase )
UpperCAmelCase__ = tokenizer.batch_decode(__UpperCAmelCase )
UpperCAmelCase__ = [seq.replace(" " , "" ) for seq in decoded_tok]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def lowercase_ (self : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
UpperCAmelCase__ = None
UpperCAmelCase__ = self.prepare_image_inputs()
UpperCAmelCase__ = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def lowercase_ (self : Dict ) -> int:
"""simple docstring"""
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
UpperCAmelCase__ = torch.randn(1 , 2_7 , 3_8 )
UpperCAmelCase__ = torch.randn(1 , 2_7 , 5_0_2_5_7 )
UpperCAmelCase__ = torch.randn(1 , 2_7 , 3_0_5_2_2 )
UpperCAmelCase__ = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["generated_text", "scores", "char_preds", "bpe_preds", "wp_preds"] )
| 366 | import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
UpperCamelCase__ = None
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = '▁'
UpperCamelCase__ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase__ = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'},
'tokenizer_file': {
'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'
},
}
UpperCamelCase__ = {
'google/pegasus-xsum': 5_1_2,
}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : str = VOCAB_FILES_NAMES
__UpperCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Union[str, Any] = PegasusTokenizer
__UpperCAmelCase : Any = ['input_ids', 'attention_mask']
def __init__(self : Optional[int] , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Any=None , __UpperCAmelCase : Union[str, Any]="<pad>" , __UpperCAmelCase : List[str]="</s>" , __UpperCAmelCase : Union[str, Any]="<unk>" , __UpperCAmelCase : int="<mask_2>" , __UpperCAmelCase : Optional[Any]="<mask_1>" , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : str=1_0_3 , **__UpperCAmelCase : str , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = offset
if additional_special_tokens is not None:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError(
f"""additional_special_tokens should be of type {type(__UpperCAmelCase )}, but is"""
f""" {type(__UpperCAmelCase )}""" )
UpperCAmelCase__ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"""<unk_{i}>""" for i in range(len(__UpperCAmelCase ) , self.offset - 1 )
]
if len(set(__UpperCAmelCase ) ) != len(__UpperCAmelCase ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
UpperCAmelCase__ = additional_special_tokens_extended
else:
UpperCAmelCase__ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset )]
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , pad_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , mask_token_sent=__UpperCAmelCase , offset=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
UpperCAmelCase__ = vocab_file
UpperCAmelCase__ = False if not self.vocab_file else True
def lowercase_ (self : List[Any] , __UpperCAmelCase : Tuple ) -> int:
"""simple docstring"""
UpperCAmelCase__ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"There should be 3 special tokens: mask_token, pad_token, and eos_token +"
f""" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}""" )
return [1 if x in all_special_ids else 0 for x in seq]
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : List , __UpperCAmelCase : Optional[List] = None , __UpperCAmelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(__UpperCAmelCase )
elif token_ids_a is None:
return self._special_token_mask(__UpperCAmelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowercase_ (self : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any]=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase_ (self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase__ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
return (out_vocab_file,)
| 143 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.