code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase_ )
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = field(default="""question-answering-extractive""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
SCREAMING_SNAKE_CASE_ : ClassVar[Features] = Features({"""question""": Value("""string""" ), """context""": Value("""string""" )} )
SCREAMING_SNAKE_CASE_ : ClassVar[Features] = Features(
{
"""answers""": Sequence(
{
"""text""": Value("""string""" ),
"""answer_start""": Value("""int32""" ),
} )
} )
SCREAMING_SNAKE_CASE_ : str = "question"
SCREAMING_SNAKE_CASE_ : str = "context"
SCREAMING_SNAKE_CASE_ : str = "answers"
@property
def __A ( self ) -> Dict[str, str]:
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 113 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
def lowercase (SCREAMING_SNAKE_CASE_ : Callable[[int | float], int | float] , SCREAMING_SNAKE_CASE_ : int | float , SCREAMING_SNAKE_CASE_ : int | float , SCREAMING_SNAKE_CASE_ : int = 1_00 , ) -> float:
SCREAMING_SNAKE_CASE = x_start
SCREAMING_SNAKE_CASE = fnc(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = 0.0
for _ in range(SCREAMING_SNAKE_CASE_ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
SCREAMING_SNAKE_CASE = (x_end - x_start) / steps + xa
SCREAMING_SNAKE_CASE = fnc(SCREAMING_SNAKE_CASE_ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
SCREAMING_SNAKE_CASE = xa
SCREAMING_SNAKE_CASE = fxa
return area
if __name__ == "__main__":
def lowercase (SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Dict:
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
__UpperCamelCase = 10
while i <= 100000:
print(f'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''')
i *= 10
| 113 | 1 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=3 , lowerCAmelCase_=32 , lowerCAmelCase_=3 , lowerCAmelCase_=10 , lowerCAmelCase_=[10, 20, 30, 40] , lowerCAmelCase_=[1, 1, 2, 1] , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_="relu" , lowerCAmelCase_=3 , lowerCAmelCase_=None , ) -> Optional[Any]:
_A = parent
_A = batch_size
_A = image_size
_A = num_channels
_A = embeddings_size
_A = hidden_sizes
_A = depths
_A = is_training
_A = use_labels
_A = hidden_act
_A = num_labels
_A = scope
_A = len(lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> List[Any]:
_A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A = self.get_config()
return config, pixel_values
def UpperCAmelCase ( self ) -> Tuple:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
_A = FlaxRegNetModel(config=lowerCAmelCase_ )
_A = model(lowerCAmelCase_ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
_A = self.num_labels
_A = FlaxRegNetForImageClassification(config=lowerCAmelCase_ )
_A = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self ) -> Dict:
_A = self.prepare_config_and_inputs()
_A , _A = config_and_inputs
_A = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class a ( __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :Dict = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
lowerCamelCase :List[str] = False
lowerCamelCase :Optional[Any] = False
lowerCamelCase :Any = False
def UpperCAmelCase ( self ) -> None:
_A = FlaxRegNetModelTester(self )
_A = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Any:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self ) -> str:
return
def UpperCAmelCase ( self ) -> Tuple:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> List[str]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def UpperCAmelCase ( self ) -> List[str]:
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def UpperCAmelCase ( self ) -> Optional[Any]:
pass
def UpperCAmelCase ( self ) -> Any:
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(lowerCAmelCase_ )
_A = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> List[str]:
def check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_A = model_class(lowerCAmelCase_ )
_A = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
_A = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_A = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase_ ) , expected_num_stages + 1 )
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_A = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
_A = model_class(lowerCAmelCase_ )
@jax.jit
def model_jitted(lowerCAmelCase_ , **lowerCAmelCase_ ):
return model(pixel_values=lowerCAmelCase_ , **lowerCAmelCase_ )
with self.subTest("""JIT Enabled""" ):
_A = model_jitted(**lowerCAmelCase_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_A = model_jitted(**lowerCAmelCase_ ).to_tuple()
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) )
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case ( ) -> List[str]:
_A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
return image
@require_flax
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase ( self ) -> str:
return AutoImageProcessor.from_pretrained("""facebook/regnet-y-040""" ) if is_vision_available() else None
@slow
def UpperCAmelCase ( self ) -> Any:
_A = FlaxRegNetForImageClassification.from_pretrained("""facebook/regnet-y-040""" )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(images=lowerCAmelCase_ , return_tensors="""np""" )
_A = model(**lowerCAmelCase_ )
# verify the logits
_A = (1, 10_00)
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
_A = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4 ) )
| 81 | def snake_case ( snake_case__ :str) -> int:
_A = hex_num.strip()
if not hex_num:
raise ValueError("""No value was passed to the function""")
_A = hex_num[0] == """-"""
if is_negative:
_A = hex_num[1:]
try:
_A = int(snake_case__ , 16)
except ValueError:
raise ValueError("""Invalid value was passed to the function""")
_A = """"""
while int_num > 0:
_A = str(int_num % 2) + bin_str
int_num >>= 1
return int(("""-""" + bin_str) if is_negative else bin_str)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 81 | 1 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class lowercase__( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> Tuple:
lowercase_ = jnp.ones((batch_size, length) ) / length
return scores
def _lowercase ( self : Tuple ) -> Optional[int]:
lowercase_ = None
lowercase_ = 2_0
lowercase_ = self._get_uniform_logits(batch_size=2 , length=SCREAMING_SNAKE_CASE_ )
# tweak scores to not be uniform anymore
lowercase_ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowercase_ = scores.at[1, 1_0].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowercase_ = jax.nn.softmax(SCREAMING_SNAKE_CASE_ , axis=-1 )
lowercase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowercase_ = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowercase_ = jax.nn.softmax(temp_dist_warper_sharper(SCREAMING_SNAKE_CASE_ , scores.copy() , cur_len=SCREAMING_SNAKE_CASE_ ) , axis=-1 )
lowercase_ = jax.nn.softmax(temp_dist_warper_smoother(SCREAMING_SNAKE_CASE_ , scores.copy() , cur_len=SCREAMING_SNAKE_CASE_ ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def _lowercase ( self : str ) -> Union[str, Any]:
lowercase_ = None
lowercase_ = 1_0
lowercase_ = 2
# create ramp distribution
lowercase_ = np.broadcast_to(np.arange(SCREAMING_SNAKE_CASE_ )[None, :] , (batch_size, vocab_size) ).copy()
lowercase_ = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowercase_ = FlaxTopKLogitsWarper(3 )
lowercase_ = top_k_warp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowercase_ = 5
lowercase_ = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
lowercase_ = np.broadcast_to(np.arange(SCREAMING_SNAKE_CASE_ )[None, :] , (batch_size, length) ).copy()
lowercase_ = top_k_warp_safety_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def _lowercase ( self : Dict ) -> List[str]:
lowercase_ = None
lowercase_ = 1_0
lowercase_ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowercase_ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
lowercase_ = FlaxTopPLogitsWarper(0.8 )
lowercase_ = np.exp(top_p_warp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowercase_ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
# check edge cases with negative and extreme logits
lowercase_ = np.broadcast_to(np.arange(SCREAMING_SNAKE_CASE_ )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowercase_ = ramp_logits[1] * 1_00.0
# make sure at least 2 tokens are kept
lowercase_ = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
lowercase_ = top_p_warp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def _lowercase ( self : Any ) -> List[str]:
lowercase_ = 2_0
lowercase_ = 4
lowercase_ = 0
lowercase_ = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=SCREAMING_SNAKE_CASE_ )
# check that min length is applied at length 5
lowercase_ = ids_tensor((batch_size, 2_0) , vocab_size=2_0 )
lowercase_ = 5
lowercase_ = self._get_uniform_logits(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = min_dist_processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
lowercase_ = self._get_uniform_logits(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = 1_5
lowercase_ = min_dist_processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
self.assertFalse(jnp.isinf(SCREAMING_SNAKE_CASE_ ).any() )
def _lowercase ( self : Optional[Any] ) -> int:
lowercase_ = 2_0
lowercase_ = 4
lowercase_ = 0
lowercase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=SCREAMING_SNAKE_CASE_ )
# check that all scores are -inf except the bos_token_id score
lowercase_ = ids_tensor((batch_size, 1) , vocab_size=2_0 )
lowercase_ = 1
lowercase_ = self._get_uniform_logits(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = logits_processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowercase_ = 3
lowercase_ = self._get_uniform_logits(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = logits_processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
self.assertFalse(jnp.isinf(SCREAMING_SNAKE_CASE_ ).any() )
def _lowercase ( self : List[Any] ) -> List[Any]:
lowercase_ = 2_0
lowercase_ = 4
lowercase_ = 0
lowercase_ = 5
lowercase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowercase_ = ids_tensor((batch_size, 4) , vocab_size=2_0 )
lowercase_ = 4
lowercase_ = self._get_uniform_logits(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = logits_processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowercase_ = 3
lowercase_ = self._get_uniform_logits(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = logits_processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
self.assertFalse(jnp.isinf(SCREAMING_SNAKE_CASE_ ).any() )
def _lowercase ( self : List[Any] ) -> List[str]:
lowercase_ = 4
lowercase_ = 1_0
lowercase_ = 1_5
lowercase_ = 2
lowercase_ = 1
lowercase_ = 1_5
# dummy input_ids and scores
lowercase_ = ids_tensor((batch_size, sequence_length) , SCREAMING_SNAKE_CASE_ )
lowercase_ = input_ids.copy()
lowercase_ = self._get_uniform_logits(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = scores.copy()
# instantiate all dist processors
lowercase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowercase_ = FlaxTopKLogitsWarper(3 )
lowercase_ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowercase_ = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=SCREAMING_SNAKE_CASE_ )
lowercase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=SCREAMING_SNAKE_CASE_ )
lowercase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ )
lowercase_ = 1_0
# no processor list
lowercase_ = temp_dist_warp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
lowercase_ = top_k_warp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
lowercase_ = top_p_warp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
lowercase_ = min_dist_proc(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
lowercase_ = bos_dist_proc(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
lowercase_ = eos_dist_proc(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
# with processor list
lowercase_ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowercase_ = processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
# scores should be equal
self.assertTrue(jnp.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def _lowercase ( self : List[str] ) -> int:
lowercase_ = 4
lowercase_ = 1_0
lowercase_ = 1_5
lowercase_ = 2
lowercase_ = 1
lowercase_ = 1_5
# dummy input_ids and scores
lowercase_ = ids_tensor((batch_size, sequence_length) , SCREAMING_SNAKE_CASE_ )
lowercase_ = input_ids.copy()
lowercase_ = self._get_uniform_logits(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = scores.copy()
# instantiate all dist processors
lowercase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowercase_ = FlaxTopKLogitsWarper(3 )
lowercase_ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowercase_ = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=SCREAMING_SNAKE_CASE_ )
lowercase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=SCREAMING_SNAKE_CASE_ )
lowercase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ )
lowercase_ = 1_0
# no processor list
def run_no_processor_list(SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int ):
lowercase_ = temp_dist_warp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
lowercase_ = top_k_warp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
lowercase_ = top_p_warp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
lowercase_ = min_dist_proc(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
lowercase_ = bos_dist_proc(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
lowercase_ = eos_dist_proc(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
return scores
# with processor list
def run_processor_list(SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] ):
lowercase_ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowercase_ = processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
return scores
lowercase_ = jax.jit(SCREAMING_SNAKE_CASE_ )
lowercase_ = jax.jit(SCREAMING_SNAKE_CASE_ )
lowercase_ = jitted_run_no_processor_list(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = jitted_run_processor_list(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# scores should be equal
self.assertTrue(jnp.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 30 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
__a = 'sshleifer/bart-tiny-random'
__a = 'patrickvonplaten/t5-tiny-random'
@require_torch
class lowercase__( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : Any ) -> Tuple:
return AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : int ) -> List[Any]:
lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[str] ) -> List[Any]:
lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=SCREAMING_SNAKE_CASE_ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _lowercase ( self : List[Any] ) -> List[Any]:
lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=SCREAMING_SNAKE_CASE_ , d=SCREAMING_SNAKE_CASE_ )
| 30 | 1 |
'''simple docstring'''
def UpperCamelCase_ ( A__ : list ):
'''simple docstring'''
for i in range(len(A__ ) - 1 , 0 , -1 ):
lowerCAmelCase_ : Dict = False
for j in range(A__ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
lowerCAmelCase_, lowerCAmelCase_ : Union[str, Any] = unsorted[j - 1], unsorted[j]
lowerCAmelCase_ : List[Any] = True
for j in range(A__ ):
if unsorted[j] > unsorted[j + 1]:
lowerCAmelCase_, lowerCAmelCase_ : List[Any] = unsorted[j + 1], unsorted[j]
lowerCAmelCase_ : List[Any] = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Dict = input("Enter numbers separated by a comma:\n").strip()
__A : Any = [int(item) for item in user_input.split(",")]
print(F'''{cocktail_shaker_sort(unsorted) = }''')
| 89 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase_ ( A__ : int | float | str , A__ : int | float | str ):
'''simple docstring'''
if nth_term == "":
return [""]
lowerCAmelCase_ : str = int(A__ )
lowerCAmelCase_ : Tuple = int(A__ )
lowerCAmelCase_ : list[str] = []
for temp in range(int(A__ ) ):
series.append(f'1 / {pow(temp + 1 , int(A__ ) )}' if series else """1""" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : str = int(input("Enter the last number (nth term) of the P-Series"))
__A : Tuple = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 89 | 1 |
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class A ( unittest.TestCase , _UpperCAmelCase ):
"""simple docstring"""
def snake_case__ ( self : str )-> Union[str, Any]:
'''simple docstring'''
A__ = load_tool('text-to-speech' )
self.tool.setup()
def snake_case__ ( self : int )-> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
A__ = self.tool('hey' )
A__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3],torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ),) )
def snake_case__ ( self : int )-> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
A__ = self.tool('hey' )
A__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3],torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ),) )
| 7 |
import math
def lowerCAmelCase__( lowercase : list , lowercase : int = 0 , lowercase : int = 0 ) -> list:
__snake_case : Any = end or len(lowercase )
for i in range(lowercase , lowercase ):
__snake_case : List[str] = i
__snake_case : Union[str, Any] = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
__snake_case : Optional[Any] = array[temp_index - 1]
temp_index -= 1
__snake_case : Any = temp_index_value
return array
def lowerCAmelCase__( lowercase : list , lowercase : int , lowercase : int ) -> None: # Max Heap
__snake_case : Any = index
__snake_case : Optional[Any] = 2 * index + 1 # Left Node
__snake_case : str = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
__snake_case : Optional[int] = left_index
if right_index < heap_size and array[largest] < array[right_index]:
__snake_case : Tuple = right_index
if largest != index:
__snake_case , __snake_case : int = array[largest], array[index]
heapify(lowercase , lowercase , lowercase )
def lowerCAmelCase__( lowercase : list ) -> list:
__snake_case : List[str] = len(lowercase )
for i in range(n // 2 , -1 , -1 ):
heapify(lowercase , lowercase , lowercase )
for i in range(n - 1 , 0 , -1 ):
__snake_case , __snake_case : Optional[Any] = array[0], array[i]
heapify(lowercase , 0 , lowercase )
return array
def lowerCAmelCase__( lowercase : list , lowercase : int , lowercase : int , lowercase : int ) -> int:
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def lowerCAmelCase__( lowercase : list , lowercase : int , lowercase : int , lowercase : int ) -> int:
__snake_case : Union[str, Any] = low
__snake_case : Union[str, Any] = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
__snake_case , __snake_case : str = array[j], array[i]
i += 1
def lowerCAmelCase__( lowercase : list ) -> list:
if len(lowercase ) == 0:
return array
__snake_case : Union[str, Any] = 2 * math.ceil(math.loga(len(lowercase ) ) )
__snake_case : Dict = 16
return intro_sort(lowercase , 0 , len(lowercase ) , lowercase , lowercase )
def lowerCAmelCase__( lowercase : list , lowercase : int , lowercase : int , lowercase : int , lowercase : int ) -> list:
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(lowercase )
max_depth -= 1
__snake_case : List[str] = median_of_a(lowercase , lowercase , start + ((end - start) // 2) + 1 , end - 1 )
__snake_case : Optional[Any] = partition(lowercase , lowercase , lowercase , lowercase )
intro_sort(lowercase , lowercase , lowercase , lowercase , lowercase )
__snake_case : List[str] = p
return insertion_sort(lowercase , lowercase , lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCamelCase = input('''Enter numbers separated by a comma : ''').strip()
_UpperCamelCase = [float(item) for item in user_input.split(''',''')]
print(sort(unsorted))
| 326 | 0 |
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = len(_UpperCAmelCase )
__a = [[0] * n for i in range(_UpperCAmelCase )]
for i in range(_UpperCAmelCase ):
__a = y_points[i]
for i in range(2 , _UpperCAmelCase ):
for j in range(_UpperCAmelCase , _UpperCAmelCase ):
__a = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 131 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class _A :
UpperCamelCase__ : int
UpperCamelCase__ : TreeNode | None = None
UpperCamelCase__ : TreeNode | None = None
__snake_case :Optional[Any] = namedtuple('''CoinsDistribResult''', '''moves excess''')
def __snake_case ( _UpperCAmelCase ):
if root is None:
return 0
# Validation
def count_nodes(_UpperCAmelCase ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(_UpperCAmelCase ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_UpperCAmelCase ) != count_coins(_UpperCAmelCase ):
raise ValueError('''The nodes number should be same as the number of coins''' )
# Main calculation
def get_distrib(_UpperCAmelCase ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
__a , __a = get_distrib(node.left )
__a , __a = get_distrib(node.right )
__a = 1 - left_distrib_excess
__a = 1 - right_distrib_excess
__a = (
left_distrib_moves
+ right_distrib_moves
+ abs(_UpperCAmelCase )
+ abs(_UpperCAmelCase )
)
__a = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_UpperCAmelCase , _UpperCAmelCase )
return get_distrib(_UpperCAmelCase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 131 | 1 |
'''simple docstring'''
import sys
import turtle
def UpperCamelCase_ ( A__ : tuple[float, float] , A__ : tuple[float, float] ):
'''simple docstring'''
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def UpperCamelCase_ ( A__ : tuple[float, float] , A__ : tuple[float, float] , A__ : tuple[float, float] , A__ : int , ):
'''simple docstring'''
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(A__ , get_mid(A__ , A__ ) , get_mid(A__ , A__ ) , depth - 1 )
triangle(A__ , get_mid(A__ , A__ ) , get_mid(A__ , A__ ) , depth - 1 )
triangle(A__ , get_mid(A__ , A__ ) , get_mid(A__ , A__ ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"Correct format for using this script: "
"python fractals.py <int:depth_for_fractal>"
)
__A : Union[str, Any] = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("red")
__A : List[str] = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 120 |
'''simple docstring'''
def UpperCamelCase_ ( A__ : str , A__ : list[str] ):
'''simple docstring'''
lowerCAmelCase_ : Dict = """"""
for word_or_phrase in separated:
if not isinstance(A__ , A__ ):
raise Exception("""join() accepts only strings to be joined""" )
joined += word_or_phrase + separator
return joined.strip(A__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 120 | 1 |
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
UpperCamelCase__ : Dict = """bert-base-cased"""
UpperCamelCase__ : Dict = """google/pegasus-xsum"""
UpperCamelCase__ : Optional[int] = [""" Sam ate lunch today.""", """Sams lunch ingredients."""]
UpperCamelCase__ : Tuple = ["""A very interesting story about what I ate for lunch.""", """Avocado, celery, turkey, coffee"""]
UpperCamelCase__ : str = """patrickvonplaten/t5-tiny-random"""
UpperCamelCase__ : Any = """sshleifer/bart-tiny-random"""
UpperCamelCase__ : Any = """sshleifer/tiny-mbart"""
UpperCamelCase__ : int = """sshleifer/tiny-marian-en-de"""
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> Tuple:
"""simple docstring"""
a = '''\n'''.join(snake_case_ )
Path(snake_case_ ).open('''w''' ).writelines(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
"""simple docstring"""
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(snake_case_, f"""{split}.source""" ), snake_case_ )
_dump_articles(os.path.join(snake_case_, f"""{split}.target""" ), snake_case_ )
return tmp_dir
class lowerCamelCase_ ( a_ ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] ,)
@slow
def SCREAMING_SNAKE_CASE_ ( self : str ,__lowerCamelCase : str ):
'''simple docstring'''
a = AutoTokenizer.from_pretrained(__lowerCamelCase )
a = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
a = max(len(tokenizer.encode(__lowerCamelCase ) ) for a in ARTICLES )
a = max(len(tokenizer.encode(__lowerCamelCase ) ) for a in SUMMARIES )
a = 4
a = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
a , a = '''ro_RO''', '''de_DE''' # ignored for all but mbart, but never causes error.
a = SeqaSeqDataset(
__lowerCamelCase ,data_dir=__lowerCamelCase ,type_path='''train''' ,max_source_length=__lowerCamelCase ,max_target_length=__lowerCamelCase ,src_lang=__lowerCamelCase ,tgt_lang=__lowerCamelCase ,)
a = DataLoader(__lowerCamelCase ,batch_size=2 ,collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(__lowerCamelCase ,__lowerCamelCase )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
a = shift_tokens_right(batch['''labels'''] ,tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : int ):
'''simple docstring'''
a = AutoTokenizer.from_pretrained(__lowerCamelCase )
a = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
a = max(len(tokenizer.encode(__lowerCamelCase ) ) for a in ARTICLES )
a = max(len(tokenizer.encode(__lowerCamelCase ) ) for a in SUMMARIES )
a = 4
a = LegacySeqaSeqDataset(
__lowerCamelCase ,data_dir=__lowerCamelCase ,type_path='''train''' ,max_source_length=20 ,max_target_length=__lowerCamelCase ,)
a = DataLoader(__lowerCamelCase ,batch_size=2 ,collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
a = AutoTokenizer.from_pretrained('''facebook/mbart-large-cc25''' )
a = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
a = tmp_dir.joinpath('''train.source''' ).open().readlines()
a = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(__lowerCamelCase ,__lowerCamelCase ,1_28 ,__lowerCamelCase )
a = {x.name for x in tmp_dir.iterdir()}
a = {x.name for x in save_dir.iterdir()}
a = save_dir.joinpath('''train.source''' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(__lowerCamelCase ) < len(__lowerCamelCase )
assert len(__lowerCamelCase ) == 1
assert len(packed_examples[0] ) == sum(len(__lowerCamelCase ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE ,reason='''This test requires fairseq''' )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
if not FAIRSEQ_AVAILABLE:
return
a , a , a = self._get_dataset(max_len=64 )
a = 64
a = ds.make_dynamic_sampler(__lowerCamelCase ,required_batch_size_multiple=__lowerCamelCase )
a = [len(__lowerCamelCase ) for x in batch_sampler]
assert len(set(__lowerCamelCase ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(__lowerCamelCase ) == len(__lowerCamelCase ) # no dropped or added examples
a = DataLoader(__lowerCamelCase ,batch_sampler=__lowerCamelCase ,collate_fn=ds.collate_fn ,num_workers=2 )
a = []
a = []
for batch in data_loader:
a = batch['''input_ids'''].shape
a = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
a = np.product(batch['''input_ids'''].shape )
num_src_per_batch.append(__lowerCamelCase )
if num_src_tokens > (max_tokens * 1.1):
failures.append(__lowerCamelCase )
assert num_src_per_batch[0] == max(__lowerCamelCase )
if failures:
raise AssertionError(F"""too many tokens in {len(__lowerCamelCase )} batches""" )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
a , a , a = self._get_dataset(max_len=5_12 )
a = 2
a = ds.make_sortish_sampler(__lowerCamelCase ,shuffle=__lowerCamelCase )
a = DataLoader(__lowerCamelCase ,batch_size=__lowerCamelCase ,collate_fn=ds.collate_fn ,num_workers=2 )
a = DataLoader(__lowerCamelCase ,batch_size=__lowerCamelCase ,collate_fn=ds.collate_fn ,num_workers=2 ,sampler=__lowerCamelCase )
a = tokenizer.pad_token_id
def count_pad_tokens(__lowerCamelCase : Optional[Any] ,__lowerCamelCase : List[str]="input_ids" ):
return [batch[k].eq(__lowerCamelCase ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(__lowerCamelCase ,k='''labels''' ) ) < sum(count_pad_tokens(__lowerCamelCase ,k='''labels''' ) )
assert sum(count_pad_tokens(__lowerCamelCase ) ) < sum(count_pad_tokens(__lowerCamelCase ) )
assert len(__lowerCamelCase ) == len(__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : str=10_00 ,__lowerCamelCase : str=1_28 ):
'''simple docstring'''
if os.getenv('''USE_REAL_DATA''' ,__lowerCamelCase ):
a = '''examples/seq2seq/wmt_en_ro'''
a = max_len * 2 * 64
if not Path(__lowerCamelCase ).joinpath('''train.len''' ).exists():
save_len_file(__lowerCamelCase ,__lowerCamelCase )
else:
a = '''examples/seq2seq/test_data/wmt_en_ro'''
a = max_len * 4
save_len_file(__lowerCamelCase ,__lowerCamelCase )
a = AutoTokenizer.from_pretrained(__lowerCamelCase )
a = SeqaSeqDataset(
__lowerCamelCase ,data_dir=__lowerCamelCase ,type_path='''train''' ,max_source_length=__lowerCamelCase ,max_target_length=__lowerCamelCase ,n_obs=__lowerCamelCase ,)
return ds, max_tokens, tokenizer
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
a , a , a = self._get_dataset()
a = set(DistributedSortishSampler(__lowerCamelCase ,2_56 ,num_replicas=2 ,rank=0 ,add_extra_examples=__lowerCamelCase ) )
a = set(DistributedSortishSampler(__lowerCamelCase ,2_56 ,num_replicas=2 ,rank=1 ,add_extra_examples=__lowerCamelCase ) )
assert idsa.intersection(__lowerCamelCase ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] ,)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : Optional[Any] ):
'''simple docstring'''
a = AutoTokenizer.from_pretrained(__lowerCamelCase ,use_fast=__lowerCamelCase )
if tok_name == MBART_TINY:
a = SeqaSeqDataset(
__lowerCamelCase ,data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ,type_path='''train''' ,max_source_length=4 ,max_target_length=8 ,src_lang='''EN''' ,tgt_lang='''FR''' ,)
a = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
a = SeqaSeqDataset(
__lowerCamelCase ,data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ,type_path='''train''' ,max_source_length=4 ,max_target_length=8 ,)
a = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(__lowerCamelCase ) == 1 if tok_name == BART_TINY else len(__lowerCamelCase ) == 0
| 330 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCamelCase__ : Union[str, Any] = {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCamelCase_ ( a_ ):
SCREAMING_SNAKE_CASE_ = 'yolos'
def __init__( self : Union[str, Any] ,__lowerCamelCase : int=7_68 ,__lowerCamelCase : Dict=12 ,__lowerCamelCase : Union[str, Any]=12 ,__lowerCamelCase : List[Any]=30_72 ,__lowerCamelCase : int="gelu" ,__lowerCamelCase : int=0.0 ,__lowerCamelCase : str=0.0 ,__lowerCamelCase : Optional[Any]=0.02 ,__lowerCamelCase : int=1e-12 ,__lowerCamelCase : Any=[5_12, 8_64] ,__lowerCamelCase : Tuple=16 ,__lowerCamelCase : int=3 ,__lowerCamelCase : Tuple=True ,__lowerCamelCase : Optional[int]=1_00 ,__lowerCamelCase : List[Any]=True ,__lowerCamelCase : List[str]=False ,__lowerCamelCase : int=1 ,__lowerCamelCase : List[Any]=5 ,__lowerCamelCase : Optional[int]=2 ,__lowerCamelCase : int=5 ,__lowerCamelCase : str=2 ,__lowerCamelCase : Tuple=0.1 ,**__lowerCamelCase : List[Any] ,):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = initializer_range
a = layer_norm_eps
a = image_size
a = patch_size
a = num_channels
a = qkv_bias
a = num_detection_tokens
a = use_mid_position_embeddings
a = auxiliary_loss
# Hungarian matcher
a = class_cost
a = bbox_cost
a = giou_cost
# Loss coefficients
a = bbox_loss_coefficient
a = giou_loss_coefficient
a = eos_coefficient
class lowerCamelCase_ ( a_ ):
SCREAMING_SNAKE_CASE_ = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
return 1e-4
@property
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
return 12
| 330 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( _lowercase , unittest.TestCase ):
snake_case__ : str = KandinskyVaaControlnetPipeline
snake_case__ : int = ["image_embeds", "negative_image_embeds", "hint"]
snake_case__ : Dict = ["image_embeds", "negative_image_embeds", "hint"]
snake_case__ : Any = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
snake_case__ : Optional[Any] = False
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
return 3_2
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
return 3_2
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
return 1_0_0
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
torch.manual_seed(0 )
a_ : List[str] = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
a_ : Union[str, Any] = UNetaDConditionModel(**__lowerCamelCase )
return model
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
torch.manual_seed(0 )
a_ : Optional[int] = VQModel(**self.dummy_movq_kwargs )
return model
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
a_ : Tuple = self.dummy_unet
a_ : Union[str, Any] = self.dummy_movq
a_ : List[str] = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='linear' , beta_start=0.00085 , beta_end=0.012 , clip_sample=__lowerCamelCase , set_alpha_to_one=__lowerCamelCase , steps_offset=1 , prediction_type='epsilon' , thresholding=__lowerCamelCase , )
a_ : Tuple = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict=0 ) -> Any:
a_ : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
a_ : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__lowerCamelCase )
# create hint
a_ : Optional[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
if str(__lowerCamelCase ).startswith('mps' ):
a_ : List[Any] = torch.manual_seed(__lowerCamelCase )
else:
a_ : Union[str, Any] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
a_ : List[Any] = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 6_4,
"width": 6_4,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
a_ : List[Any] = "cpu"
a_ : Optional[Any] = self.get_dummy_components()
a_ : str = self.pipeline_class(**__lowerCamelCase )
a_ : int = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
a_ : List[str] = pipe(**self.get_dummy_inputs(__lowerCamelCase ) )
a_ : List[Any] = output.images
a_ : str = pipe(
**self.get_dummy_inputs(__lowerCamelCase ) , return_dict=__lowerCamelCase , )[0]
a_ : Dict = image[0, -3:, -3:, -1]
a_ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
a_ : str = np.array(
[0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
a_ : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy' )
a_ : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
a_ : List[str] = torch.from_numpy(np.array(__lowerCamelCase ) ).float() / 2_5_5.0
a_ : List[str] = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
a_ : Optional[Any] = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(__lowerCamelCase )
a_ : Tuple = KandinskyVaaControlnetPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa )
a_ : Union[str, Any] = pipeline.to(__lowerCamelCase )
pipeline.set_progress_bar_config(disable=__lowerCamelCase )
a_ : List[str] = "A robot, 4k photo"
a_ : Optional[int] = torch.Generator(device='cuda' ).manual_seed(0 )
a_ : Union[str, Any] = pipe_prior(
__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
a_ : Tuple = torch.Generator(device='cuda' ).manual_seed(0 )
a_ : Union[str, Any] = pipeline(
image_embeds=__lowerCamelCase , negative_image_embeds=__lowerCamelCase , hint=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=1_0_0 , output_type='np' , )
a_ : List[Any] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
| 32 | def _UpperCamelCase ( snake_case__ ) -> int:
__UpperCAmelCase : Union[str, Any] = abs(snake_case__ )
__UpperCAmelCase : Dict = 0
while n > 0:
res += n % 10
n //= 10
return res
def _UpperCamelCase ( snake_case__ ) -> int:
__UpperCAmelCase : Tuple = abs(snake_case__ )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def _UpperCamelCase ( snake_case__ ) -> int:
return sum(int(snake_case__ ) for c in str(abs(snake_case__ ) ) )
def _UpperCamelCase ( ) -> None:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(snake_case__, snake_case__ ) -> None:
__UpperCAmelCase : Union[str, Any] = f'''{func.__name__}({value})'''
__UpperCAmelCase : List[str] = timeit(f'''__main__.{call}''', setup="import __main__" )
print(f'''{call:56} = {func(snake_case__ )} -- {timing:.4f} seconds''' )
for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(snake_case__, snake_case__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 157 | 0 |
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
'''nielsr/canine-s''': 20_48,
}
# Unicode defines 1,114,112 total “codepoints”
UpperCAmelCase_ : int = 1_11_41_12
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
UpperCAmelCase_ : Any = 0
UpperCAmelCase_ : Optional[int] = 0Xe0_00
UpperCAmelCase_ : str = 0Xe0_01
UpperCAmelCase_ : int = 0Xe0_02
UpperCAmelCase_ : Optional[Any] = 0Xe0_03
UpperCAmelCase_ : int = 0Xe0_04
# Maps special codepoints to human-readable names.
UpperCAmelCase_ : Dict[int, str] = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
UpperCAmelCase_ : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : int , __lowerCamelCase : Dict=chr(__lowerCamelCase ) , __lowerCamelCase : Dict=chr(__lowerCamelCase ) , __lowerCamelCase : Tuple=chr(__lowerCamelCase ) , __lowerCamelCase : Tuple=chr(__lowerCamelCase ) , __lowerCamelCase : Optional[Any]=chr(__lowerCamelCase ) , __lowerCamelCase : Dict=chr(__lowerCamelCase ) , __lowerCamelCase : Tuple=False , __lowerCamelCase : List[Any]=2_048 , **__lowerCamelCase : str , ):
UpperCamelCase :List[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
UpperCamelCase :Dict = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
UpperCamelCase :List[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
UpperCamelCase :Optional[int] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
UpperCamelCase :Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase :Dict = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , model_max_length=__lowerCamelCase , **__lowerCamelCase , )
# Creates a mapping for looking up the IDs of special symbols.
UpperCamelCase :Dict[str, int] = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
UpperCamelCase :int = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
UpperCamelCase :Dict[int, str] = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
UpperCamelCase :str = UNICODE_VOCAB_SIZE
UpperCamelCase :Dict = len(self._special_codepoints )
@property
def _A ( self : int ):
return self._unicode_vocab_size
def _A ( self : Tuple , __lowerCamelCase : str ):
return list(__lowerCamelCase )
def _A ( self : Union[str, Any] , __lowerCamelCase : str ):
try:
return ord(__lowerCamelCase )
except TypeError:
raise ValueError(F"""invalid token: '{token}'""" )
def _A ( self : List[str] , __lowerCamelCase : int ):
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(__lowerCamelCase )
except TypeError:
raise ValueError(F"""invalid id: {index}""" )
def _A ( self : Optional[Any] , __lowerCamelCase : int ):
return "".join(__lowerCamelCase )
def _A ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
UpperCamelCase :List[Any] = [self.sep_token_id]
UpperCamelCase :Optional[int] = [self.cls_token_id]
UpperCamelCase :str = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def _A ( self : Any , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
UpperCamelCase :int = [1] + ([0] * len(__lowerCamelCase )) + [1]
if token_ids_a is not None:
result += ([0] * len(__lowerCamelCase )) + [1]
return result
def _A ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
UpperCamelCase :Optional[int] = [self.sep_token_id]
UpperCamelCase :Dict = [self.cls_token_id]
UpperCamelCase :Optional[Any] = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def _A ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
return ()
| 364 |
from string import ascii_lowercase, ascii_uppercase
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str ) -> str:
"""simple docstring"""
if not sentence:
return ""
UpperCamelCase :str = dict(zip(__magic_name__ , __magic_name__ ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 62 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[str] = {
"facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json",
"facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = 'xlm-roberta-xl'
def __init__( self , __snake_case=2_5_0_8_8_0 , __snake_case=2_5_6_0 , __snake_case=3_6 , __snake_case=3_2 , __snake_case=1_0_2_4_0 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_1_4 , __snake_case=1 , __snake_case=0.02 , __snake_case=1E-05 , __snake_case=1 , __snake_case=0 , __snake_case=2 , __snake_case="absolute" , __snake_case=True , __snake_case=None , **__snake_case , ):
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = hidden_act
snake_case = intermediate_size
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = position_embedding_type
snake_case = use_cache
snake_case = classifier_dropout
class A__ ( snake_case__ ):
"""simple docstring"""
@property
def a_ ( self ):
if self.task == "multiple-choice":
snake_case = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
snake_case = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 127 |
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class A__ :
"""simple docstring"""
__magic_name__ = XGLMConfig
__magic_name__ = {}
__magic_name__ = 'gelu'
def __init__( self , __snake_case , __snake_case=1_4 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=9_9 , __snake_case=3_2 , __snake_case=2 , __snake_case=4 , __snake_case=3_7 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_1_2 , __snake_case=0.02 , ):
snake_case = parent
snake_case = batch_size
snake_case = seq_length
snake_case = is_training
snake_case = use_input_mask
snake_case = use_labels
snake_case = vocab_size
snake_case = d_model
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = ffn_dim
snake_case = activation_function
snake_case = activation_dropout
snake_case = attention_dropout
snake_case = max_position_embeddings
snake_case = initializer_range
snake_case = None
snake_case = 0
snake_case = 2
snake_case = 1
def a_ ( self ):
return XGLMConfig.from_pretrained('''facebook/xglm-564M''' )
def a_ ( self ):
snake_case = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
snake_case = None
if self.use_input_mask:
snake_case = random_attention_mask([self.batch_size, self.seq_length] )
snake_case = self.get_config()
snake_case = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def a_ ( self ):
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__snake_case , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__snake_case , )
def a_ ( self ):
snake_case = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) = config_and_inputs
snake_case = {
'''input_ids''': input_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_tf
class A__ ( snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
__magic_name__ = (TFXGLMForCausalLM,) if is_tf_available() else ()
__magic_name__ = (
{'feature-extraction': TFXGLMModel, 'text-generation': TFXGLMForCausalLM} if is_tf_available() else {}
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def a_ ( self ):
snake_case = TFXGLMModelTester(self )
snake_case = ConfigTester(self , config_class=__snake_case , n_embd=3_7 )
def a_ ( self ):
self.config_tester.run_common_tests()
@slow
def a_ ( self ):
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case = TFXGLMModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''' )
def a_ ( self ):
super().test_resize_token_embeddings()
@require_tf
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def a_ ( self , __snake_case=True ):
snake_case = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
snake_case = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
snake_case = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1]
# fmt: on
snake_case = model.generate(__snake_case , do_sample=__snake_case , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , __snake_case )
@slow
def a_ ( self ):
snake_case = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
snake_case = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
tf.random.set_seed(0 )
snake_case = tokenizer('''Today is a nice day and''' , return_tensors='''tf''' )
snake_case = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(''':/CPU:0''' ):
snake_case = model.generate(__snake_case , do_sample=__snake_case , seed=[7, 0] )
snake_case = tokenizer.decode(output_ids[0] , skip_special_tokens=__snake_case )
snake_case = (
'''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'''
)
self.assertEqual(__snake_case , __snake_case )
@slow
def a_ ( self ):
snake_case = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
snake_case = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
snake_case = '''left'''
# use different length sentences to test batching
snake_case = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When''',
'''Hello, my dog is a little''',
]
snake_case = tokenizer(__snake_case , return_tensors='''tf''' , padding=__snake_case )
snake_case = inputs['''input_ids''']
snake_case = model.generate(input_ids=__snake_case , attention_mask=inputs['''attention_mask'''] , max_new_tokens=1_2 )
snake_case = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
snake_case = model.generate(input_ids=__snake_case , max_new_tokens=1_2 )
snake_case = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
snake_case = model.generate(input_ids=__snake_case , max_new_tokens=1_2 )
snake_case = tokenizer.batch_decode(__snake_case , skip_special_tokens=__snake_case )
snake_case = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__snake_case )
snake_case = tokenizer.decode(output_padded[0] , skip_special_tokens=__snake_case )
snake_case = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '''
'''a single''',
'''Hello, my dog is a little bit of a shy one, but he is very friendly''',
]
self.assertListEqual(__snake_case , __snake_case )
self.assertListEqual(__snake_case , [non_padded_sentence, padded_sentence] )
| 127 | 1 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def __A (_SCREAMING_SNAKE_CASE ) ->Optional[Any]:
"""simple docstring"""
if "img_encoder.pos_embed" in name:
lowerCAmelCase__ :str = name.replace('img_encoder.pos_embed' , 'vision_model.embeddings.position_embeddings' )
if "img_encoder.patch_embed.proj" in name:
lowerCAmelCase__ :List[Any] = name.replace('img_encoder.patch_embed.proj' , 'vision_model.embeddings.patch_embeddings.projection' )
if "img_encoder.patch_embed.norm" in name:
lowerCAmelCase__ :int = name.replace('img_encoder.patch_embed.norm' , 'vision_model.embeddings.layernorm' )
if "img_encoder.layers" in name:
lowerCAmelCase__ :Any = name.replace('img_encoder.layers' , 'vision_model.encoder.stages' )
if "blocks" in name and "res" not in name:
lowerCAmelCase__ :Union[str, Any] = name.replace('blocks' , 'layers' )
if "attn" in name and "pre_assign" not in name:
lowerCAmelCase__ :Optional[int] = name.replace('attn' , 'self_attn' )
if "proj" in name and "self_attn" in name and "text" not in name:
lowerCAmelCase__ :List[str] = name.replace('proj' , 'out_proj' )
if "pre_assign_attn.attn.proj" in name:
lowerCAmelCase__ :Optional[int] = name.replace('pre_assign_attn.attn.proj' , 'pre_assign_attn.attn.out_proj' )
if "norm1" in name:
lowerCAmelCase__ :Union[str, Any] = name.replace('norm1' , 'layer_norm1' )
if "norm2" in name and "pre_assign" not in name:
lowerCAmelCase__ :Union[str, Any] = name.replace('norm2' , 'layer_norm2' )
if "img_encoder.norm" in name:
lowerCAmelCase__ :Optional[Any] = name.replace('img_encoder.norm' , 'vision_model.layernorm' )
# text encoder
if "text_encoder.token_embedding" in name:
lowerCAmelCase__ :Tuple = name.replace('text_encoder.token_embedding' , 'text_model.embeddings.token_embedding' )
if "text_encoder.positional_embedding" in name:
lowerCAmelCase__ :int = name.replace('text_encoder.positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "text_encoder.transformer.resblocks." in name:
lowerCAmelCase__ :Tuple = name.replace('text_encoder.transformer.resblocks.' , 'text_model.encoder.layers.' )
if "ln_1" in name:
lowerCAmelCase__ :Any = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
lowerCAmelCase__ :Tuple = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
lowerCAmelCase__ :Dict = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
lowerCAmelCase__ :Optional[int] = name.replace('c_proj' , 'fc2' )
if "text_encoder" in name:
lowerCAmelCase__ :str = name.replace('text_encoder' , 'text_model' )
if "ln_final" in name:
lowerCAmelCase__ :int = name.replace('ln_final' , 'final_layer_norm' )
# projection layers
if "img_projector.linear_hidden." in name:
lowerCAmelCase__ :Dict = name.replace('img_projector.linear_hidden.' , 'visual_projection.' )
if "img_projector.linear_out." in name:
lowerCAmelCase__ :int = name.replace('img_projector.linear_out.' , 'visual_projection.3.' )
if "text_projector.linear_hidden" in name:
lowerCAmelCase__ :Tuple = name.replace('text_projector.linear_hidden' , 'text_projection' )
if "text_projector.linear_out" in name:
lowerCAmelCase__ :Tuple = name.replace('text_projector.linear_out' , 'text_projection.3' )
return name
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Tuple:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCAmelCase__ :Dict = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCAmelCase__ :Any = key.split('.' )
lowerCAmelCase__ , lowerCAmelCase__ :Optional[Any] = int(key_split[2] ), int(key_split[4] )
lowerCAmelCase__ :int = config.vision_config.hidden_size
if "weight" in key:
lowerCAmelCase__ :Optional[int] = val[:dim, :]
lowerCAmelCase__ :Optional[Any] = val[dim : dim * 2, :]
lowerCAmelCase__ :str = val[-dim:, :]
else:
lowerCAmelCase__ :Optional[int] = val[:dim]
lowerCAmelCase__ :str = val[dim : dim * 2]
lowerCAmelCase__ :List[str] = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCAmelCase__ :Tuple = key.split('.' )
lowerCAmelCase__ :List[Any] = int(key_split[3] )
lowerCAmelCase__ :List[Any] = config.text_config.hidden_size
if "weight" in key:
lowerCAmelCase__ :Any = val[:dim, :]
lowerCAmelCase__ :List[Any] = val[
dim : dim * 2, :
]
lowerCAmelCase__ :Tuple = val[-dim:, :]
else:
lowerCAmelCase__ :int = val[:dim]
lowerCAmelCase__ :str = val[dim : dim * 2]
lowerCAmelCase__ :Dict = val[-dim:]
else:
lowerCAmelCase__ :Any = rename_key(_SCREAMING_SNAKE_CASE )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowerCAmelCase__ :int = val.squeeze_()
else:
lowerCAmelCase__ :Union[str, Any] = val
return orig_state_dict
def __A () ->List[Any]:
"""simple docstring"""
lowerCAmelCase__ :int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase__ :List[Any] = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="groupvit-gcc-yfcc" , _SCREAMING_SNAKE_CASE=False ) ->int:
"""simple docstring"""
lowerCAmelCase__ :List[Any] = GroupViTConfig()
lowerCAmelCase__ :List[Any] = GroupViTModel(_SCREAMING_SNAKE_CASE ).eval()
lowerCAmelCase__ :List[Any] = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )['model']
lowerCAmelCase__ :str = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(_SCREAMING_SNAKE_CASE ) == 0)
# verify result
lowerCAmelCase__ :List[Any] = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32' )
lowerCAmelCase__ :Dict = prepare_img()
lowerCAmelCase__ :int = processor(text=['a photo of a cat', 'a photo of a dog'] , images=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
with torch.no_grad():
lowerCAmelCase__ :Dict = model(**_SCREAMING_SNAKE_CASE )
if model_name == "groupvit-gcc-yfcc":
lowerCAmelCase__ :Optional[Any] = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
lowerCAmelCase__ :List[str] = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(F"Model name {model_name} not supported." )
assert torch.allclose(outputs.logits_per_image , _SCREAMING_SNAKE_CASE , atol=1e-3 )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print('Successfully saved processor and model to' , _SCREAMING_SNAKE_CASE )
if push_to_hub:
print('Pushing to the hub...' )
processor.push_to_hub(_SCREAMING_SNAKE_CASE , organization='nielsr' )
model.push_to_hub(_SCREAMING_SNAKE_CASE , organization='nielsr' )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
__A = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 254 |
"""simple docstring"""
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__A = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
__A = direct_transformers_import(PATH_TO_TRANSFORMERS)
__A = transformers.models.auto.configuration_auto.CONFIG_MAPPING
__A = {
# used to compute the property `self.chunk_length`
"""EncodecConfig""": ["""overlap"""],
# used as `self.bert_model = BertModel(config, ...)`
"""DPRConfig""": True,
# not used in modeling files, but it's an important information
"""FSMTConfig""": ["""langs"""],
# used internally in the configuration class file
"""GPTNeoConfig""": ["""attention_types"""],
# used internally in the configuration class file
"""EsmConfig""": ["""is_folding_model"""],
# used during training (despite we don't have training script for these models yet)
"""Mask2FormerConfig""": ["""ignore_value"""],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"""OneFormerConfig""": ["""ignore_value""", """norm"""],
# used during preprocessing and collation, see `collating_graphormer.py`
"""GraphormerConfig""": ["""spatial_pos_max"""],
# used internally in the configuration class file
"""T5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"""MT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
"""UMT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
# used internally in the configuration class file
"""LongT5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
"""SwitchTransformersConfig""": ["""feed_forward_proj"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""BioGptConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""GLPNConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""SegformerConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""CvtConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""PerceiverConfig""": ["""layer_norm_eps"""],
# used internally to calculate the feature size
"""InformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""TimeSeriesTransformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""AutoformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate `mlp_dim`
"""SamVisionConfig""": ["""mlp_ratio"""],
# For (head) training, but so far not implemented
"""ClapAudioConfig""": ["""num_classes"""],
# Not used, but providing useful information to users
"""SpeechT5HifiGanConfig""": ["""sampling_rate"""],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"""CLIPSegConfig""": True,
"""DeformableDetrConfig""": True,
"""DetaConfig""": True,
"""DinatConfig""": True,
"""DonutSwinConfig""": True,
"""EfficientFormerConfig""": True,
"""FSMTConfig""": True,
"""JukeboxConfig""": True,
"""LayoutLMv2Config""": True,
"""MaskFormerSwinConfig""": True,
"""MT5Config""": True,
"""NatConfig""": True,
"""OneFormerConfig""": True,
"""PerceiverConfig""": True,
"""RagConfig""": True,
"""SpeechT5Config""": True,
"""SwinConfig""": True,
"""Swin2SRConfig""": True,
"""Swinv2Config""": True,
"""SwitchTransformersConfig""": True,
"""TableTransformerConfig""": True,
"""TapasConfig""": True,
"""TransfoXLConfig""": True,
"""UniSpeechConfig""": True,
"""UniSpeechSatConfig""": True,
"""WavLMConfig""": True,
"""WhisperConfig""": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"""JukeboxPriorConfig""": True,
# TODO: @Younes (for `is_decoder`)
"""Pix2StructTextConfig""": True,
}
)
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
lowerCAmelCase__ :List[str] = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F"config.{attribute}" in modeling_source
or F"getattr(config, \"{attribute}\"" in modeling_source
or F"getattr(self.config, \"{attribute}\"" in modeling_source
):
lowerCAmelCase__ :List[str] = True
# Deal with multi-line cases
elif (
re.search(
rF"getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"" , _SCREAMING_SNAKE_CASE , )
is not None
):
lowerCAmelCase__ :int = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
lowerCAmelCase__ :Any = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
lowerCAmelCase__ :Union[str, Any] = [
'bos_index',
'eos_index',
'pad_index',
'unk_index',
'mask_index',
'image_size',
'use_cache',
'out_features',
'out_indices',
]
lowerCAmelCase__ :Union[str, Any] = ['encoder_no_repeat_ngram_size']
# Special cases to be allowed
lowerCAmelCase__ :Any = True
if not attribute_used:
lowerCAmelCase__ :List[Any] = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
lowerCAmelCase__ :List[str] = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
lowerCAmelCase__ :Tuple = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
lowerCAmelCase__ :Optional[Any] = True
elif attribute.endswith('_token_id' ):
lowerCAmelCase__ :List[Any] = True
# configuration class specific cases
if not case_allowed:
lowerCAmelCase__ :List[str] = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
lowerCAmelCase__ :List[Any] = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def __A (_SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ :List[Any] = dict(inspect.signature(config_class.__init__ ).parameters )
lowerCAmelCase__ :List[Any] = [x for x in list(signature.keys() ) if x not in ['self', 'kwargs']]
lowerCAmelCase__ :List[Any] = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
lowerCAmelCase__ :Optional[Any] = {}
if len(config_class.attribute_map ) > 0:
lowerCAmelCase__ :Optional[int] = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
lowerCAmelCase__ :str = inspect.getsourcefile(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[Any] = os.path.dirname(_SCREAMING_SNAKE_CASE )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
lowerCAmelCase__ :Dict = [os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for fn in os.listdir(_SCREAMING_SNAKE_CASE ) if fn.startswith('modeling_' )]
# Get the source code strings
lowerCAmelCase__ :Tuple = []
for path in modeling_paths:
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
with open(_SCREAMING_SNAKE_CASE ) as fp:
modeling_sources.append(fp.read() )
lowerCAmelCase__ :Any = []
for config_param, default_value in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# `attributes` here is all the variant names for `config_param`
lowerCAmelCase__ :Optional[int] = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
unused_attributes.append(attributes[0] )
return sorted(_SCREAMING_SNAKE_CASE )
def __A () ->List[Any]:
"""simple docstring"""
lowerCAmelCase__ :Optional[int] = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
lowerCAmelCase__ :List[str] = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda _SCREAMING_SNAKE_CASE : inspect.isclass(_SCREAMING_SNAKE_CASE )
and issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and inspect.getmodule(_SCREAMING_SNAKE_CASE ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
lowerCAmelCase__ :Union[str, Any] = check_config_attributes_being_used(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
lowerCAmelCase__ :int = unused_attributes
if len(_SCREAMING_SNAKE_CASE ) > 0:
lowerCAmelCase__ :Any = 'The following configuration classes contain unused attributes in the corresponding modeling files:\n'
for name, attributes in configs_with_unused_attributes.items():
error += F"{name}: {attributes}\n"
raise ValueError(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
check_config_attributes()
| 254 | 1 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = (PNDMScheduler,)
__lowerCamelCase = (('num_inference_steps', 50),)
def UpperCamelCase ( self , **lowercase ) -> Tuple:
'''simple docstring'''
A__ = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**lowercase )
return config
def UpperCamelCase ( self , lowercase=0 , **lowercase ) -> Any:
'''simple docstring'''
A__ = dict(self.forward_default_kwargs )
A__ = kwargs.pop("num_inference_steps" , lowercase )
A__ = self.dummy_sample
A__ = 0.1 * sample
A__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A__ = self.get_scheduler_config(**lowercase )
A__ = scheduler_class(**lowercase )
scheduler.set_timesteps(lowercase )
# copy over dummy past residuals
A__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase )
A__ = scheduler_class.from_pretrained(lowercase )
new_scheduler.set_timesteps(lowercase )
# copy over dummy past residuals
A__ = dummy_past_residuals[:]
A__ = scheduler.step_prk(lowercase , lowercase , lowercase , **lowercase ).prev_sample
A__ = new_scheduler.step_prk(lowercase , lowercase , lowercase , **lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A__ = scheduler.step_plms(lowercase , lowercase , lowercase , **lowercase ).prev_sample
A__ = new_scheduler.step_plms(lowercase , lowercase , lowercase , **lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def UpperCamelCase ( self , lowercase=0 , **lowercase ) -> Tuple:
'''simple docstring'''
A__ = dict(self.forward_default_kwargs )
A__ = kwargs.pop("num_inference_steps" , lowercase )
A__ = self.dummy_sample
A__ = 0.1 * sample
A__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A__ = self.get_scheduler_config()
A__ = scheduler_class(**lowercase )
scheduler.set_timesteps(lowercase )
# copy over dummy past residuals (must be after setting timesteps)
A__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase )
A__ = scheduler_class.from_pretrained(lowercase )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase )
# copy over dummy past residual (must be after setting timesteps)
A__ = dummy_past_residuals[:]
A__ = scheduler.step_prk(lowercase , lowercase , lowercase , **lowercase ).prev_sample
A__ = new_scheduler.step_prk(lowercase , lowercase , lowercase , **lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A__ = scheduler.step_plms(lowercase , lowercase , lowercase , **lowercase ).prev_sample
A__ = new_scheduler.step_plms(lowercase , lowercase , lowercase , **lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCamelCase ( self , **lowercase ) -> Dict:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(**lowercase )
A__ = scheduler_class(**lowercase )
A__ = 10
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
scheduler.set_timesteps(lowercase )
for i, t in enumerate(scheduler.prk_timesteps ):
A__ = model(lowercase , lowercase )
A__ = scheduler.step_prk(lowercase , lowercase , lowercase ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
A__ = model(lowercase , lowercase )
A__ = scheduler.step_plms(lowercase , lowercase , lowercase ).prev_sample
return sample
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
A__ = dict(self.forward_default_kwargs )
A__ = kwargs.pop("num_inference_steps" , lowercase )
for scheduler_class in self.scheduler_classes:
A__ = self.get_scheduler_config()
A__ = scheduler_class(**lowercase )
A__ = self.dummy_sample
A__ = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase , "set_timesteps" ):
scheduler.set_timesteps(lowercase )
elif num_inference_steps is not None and not hasattr(lowercase , "set_timesteps" ):
A__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
A__ = dummy_past_residuals[:]
A__ = scheduler.step_prk(lowercase , 0 , lowercase , **lowercase ).prev_sample
A__ = scheduler.step_prk(lowercase , 1 , lowercase , **lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
A__ = scheduler.step_plms(lowercase , 0 , lowercase , **lowercase ).prev_sample
A__ = scheduler.step_plms(lowercase , 1 , lowercase , **lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase )
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowercase )
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(steps_offset=1 )
A__ = scheduler_class(**lowercase )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=lowercase , beta_end=lowercase )
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase )
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowercase )
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowercase )
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
A__ = 27
for scheduler_class in self.scheduler_classes:
A__ = self.dummy_sample
A__ = 0.1 * sample
A__ = self.get_scheduler_config()
A__ = scheduler_class(**lowercase )
scheduler.set_timesteps(lowercase )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
A__ = scheduler.step_prk(lowercase , lowercase , lowercase ).prev_sample
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
with self.assertRaises(lowercase ):
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**lowercase )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = self.full_loop()
A__ = torch.sum(torch.abs(lowercase ) )
A__ = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 198.1318 ) < 1e-2
assert abs(result_mean.item() - 0.2580 ) < 1e-3
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = self.full_loop(prediction_type="v_prediction" )
A__ = torch.sum(torch.abs(lowercase ) )
A__ = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 67.3986 ) < 1e-2
assert abs(result_mean.item() - 0.0878 ) < 1e-3
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = self.full_loop(set_alpha_to_one=lowercase , beta_start=0.01 )
A__ = torch.sum(torch.abs(lowercase ) )
A__ = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 230.0399 ) < 1e-2
assert abs(result_mean.item() - 0.2995 ) < 1e-3
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = self.full_loop(set_alpha_to_one=lowercase , beta_start=0.01 )
A__ = torch.sum(torch.abs(lowercase ) )
A__ = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 186.9482 ) < 1e-2
assert abs(result_mean.item() - 0.2434 ) < 1e-3
| 68 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowerCamelCase__ = {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] ='albert'
def __init__( self : Optional[Any] , __lowercase : Union[str, Any]=30000 , __lowercase : List[str]=128 , __lowercase : Optional[Any]=4096 , __lowercase : Dict=12 , __lowercase : Any=1 , __lowercase : Optional[Any]=64 , __lowercase : Any=16384 , __lowercase : Any=1 , __lowercase : Union[str, Any]="gelu_new" , __lowercase : List[str]=0 , __lowercase : int=0 , __lowercase : Dict=512 , __lowercase : str=2 , __lowercase : List[str]=0.02 , __lowercase : Union[str, Any]=1E-12 , __lowercase : int=0.1 , __lowercase : Any="absolute" , __lowercase : Optional[int]=0 , __lowercase : Dict=2 , __lowercase : Optional[Any]=3 , **__lowercase : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
__a = vocab_size
__a = embedding_size
__a = hidden_size
__a = num_hidden_layers
__a = num_hidden_groups
__a = num_attention_heads
__a = inner_group_num
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = classifier_dropout_prob
__a = position_embedding_type
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
__a = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__a = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 302 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase = {
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 333 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {
'''configuration_mask2former''': [
'''MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Mask2FormerConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''Mask2FormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Mask2FormerForUniversalSegmentation''',
'''Mask2FormerModel''',
'''Mask2FormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 333 | 1 |
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def lowercase__ ( __lowercase : Optional[Any] ) -> List[str]:
"""simple docstring"""
return {key.lstrip('-' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def lowercase__ ( ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = ArgumentParser(
'HuggingFace Datasets CLI tool' , usage='datasets-cli <command> [<args>]' , allow_abbrev=_UpperCAmelCase )
__UpperCamelCase = parser.add_subparsers(help='datasets-cli command helpers' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(_UpperCAmelCase )
EnvironmentCommand.register_subcommand(_UpperCAmelCase )
TestCommand.register_subcommand(_UpperCAmelCase )
RunBeamCommand.register_subcommand(_UpperCAmelCase )
DummyDataCommand.register_subcommand(_UpperCAmelCase )
# Parse args
__UpperCamelCase = parser.parse_known_args()
if not hasattr(_UpperCAmelCase , 'func' ):
parser.print_help()
exit(1 )
__UpperCamelCase = parse_unknown_args(_UpperCAmelCase )
# Run
__UpperCamelCase = args.func(_UpperCAmelCase , **_UpperCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 53 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : str = {
'facebook/convnextv2-tiny-1k-224': 'https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json',
}
class lowercase ( __UpperCAmelCase , __UpperCAmelCase):
__lowerCAmelCase : List[Any] = """convnextv2"""
def __init__( self : int , _lowerCamelCase : str=3 , _lowerCamelCase : str=4 , _lowerCamelCase : List[Any]=4 , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : List[Any]=None , _lowerCamelCase : Optional[int]="gelu" , _lowerCamelCase : Union[str, Any]=0.02 , _lowerCamelCase : List[str]=1E-12 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Optional[int]=2_24 , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Optional[Any]=None , **_lowerCamelCase : Optional[Any] , ):
"""simple docstring"""
super().__init__(**_lowerCamelCase )
A_ : str = num_channels
A_ : int = patch_size
A_ : Union[str, Any] = num_stages
A_ : Any = [96, 1_92, 3_84, 7_68] if hidden_sizes is None else hidden_sizes
A_ : Any = [3, 3, 9, 3] if depths is None else depths
A_ : Optional[int] = hidden_act
A_ : Tuple = initializer_range
A_ : int = layer_norm_eps
A_ : List[Any] = drop_path_rate
A_ : Union[str, Any] = image_size
A_ : Any = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
A_ , A_ : Tuple = get_aligned_output_features_output_indices(
out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names )
| 167 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Any =logging.get_logger(__name__)
UpperCAmelCase__ : int ={
'''MIT/ast-finetuned-audioset-10-10-0.4593''': (
'''https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'''
),
}
class __A ( a ):
__A = """audio-spectrogram-transformer"""
def __init__( self , UpperCAmelCase_=768 , UpperCAmelCase_=12 , UpperCAmelCase_=12 , UpperCAmelCase_=3072 , UpperCAmelCase_="gelu" , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.0_2 , UpperCAmelCase_=1E-12 , UpperCAmelCase_=16 , UpperCAmelCase_=True , UpperCAmelCase_=10 , UpperCAmelCase_=10 , UpperCAmelCase_=1024 , UpperCAmelCase_=128 , **UpperCAmelCase_ , ):
super().__init__(**UpperCAmelCase_ )
lowerCamelCase =hidden_size
lowerCamelCase =num_hidden_layers
lowerCamelCase =num_attention_heads
lowerCamelCase =intermediate_size
lowerCamelCase =hidden_act
lowerCamelCase =hidden_dropout_prob
lowerCamelCase =attention_probs_dropout_prob
lowerCamelCase =initializer_range
lowerCamelCase =layer_norm_eps
lowerCamelCase =patch_size
lowerCamelCase =qkv_bias
lowerCamelCase =frequency_stride
lowerCamelCase =time_stride
lowerCamelCase =max_length
lowerCamelCase =num_mel_bins
| 262 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 / sqrt(2 ) ) -> IIRFilter:
lowerCamelCase =tau * frequency / samplerate
lowerCamelCase =sin(_UpperCAmelCase )
lowerCamelCase =cos(_UpperCAmelCase )
lowerCamelCase =_sin / (2 * q_factor)
lowerCamelCase =(1 - _cos) / 2
lowerCamelCase =1 - _cos
lowerCamelCase =1 + alpha
lowerCamelCase =-2 * _cos
lowerCamelCase =1 - alpha
lowerCamelCase =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 / sqrt(2 ) ) -> IIRFilter:
lowerCamelCase =tau * frequency / samplerate
lowerCamelCase =sin(_UpperCAmelCase )
lowerCamelCase =cos(_UpperCAmelCase )
lowerCamelCase =_sin / (2 * q_factor)
lowerCamelCase =(1 + _cos) / 2
lowerCamelCase =-1 - _cos
lowerCamelCase =1 + alpha
lowerCamelCase =-2 * _cos
lowerCamelCase =1 - alpha
lowerCamelCase =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 / sqrt(2 ) ) -> IIRFilter:
lowerCamelCase =tau * frequency / samplerate
lowerCamelCase =sin(_UpperCAmelCase )
lowerCamelCase =cos(_UpperCAmelCase )
lowerCamelCase =_sin / (2 * q_factor)
lowerCamelCase =_sin / 2
lowerCamelCase =0
lowerCamelCase =-ba
lowerCamelCase =1 + alpha
lowerCamelCase =-2 * _cos
lowerCamelCase =1 - alpha
lowerCamelCase =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 / sqrt(2 ) ) -> IIRFilter:
lowerCamelCase =tau * frequency / samplerate
lowerCamelCase =sin(_UpperCAmelCase )
lowerCamelCase =cos(_UpperCAmelCase )
lowerCamelCase =_sin / (2 * q_factor)
lowerCamelCase =1 - alpha
lowerCamelCase =-2 * _cos
lowerCamelCase =1 + alpha
lowerCamelCase =IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 / sqrt(2 ) , ) -> IIRFilter:
lowerCamelCase =tau * frequency / samplerate
lowerCamelCase =sin(_UpperCAmelCase )
lowerCamelCase =cos(_UpperCAmelCase )
lowerCamelCase =_sin / (2 * q_factor)
lowerCamelCase =10 ** (gain_db / 40)
lowerCamelCase =1 + alpha * big_a
lowerCamelCase =-2 * _cos
lowerCamelCase =1 - alpha * big_a
lowerCamelCase =1 + alpha / big_a
lowerCamelCase =-2 * _cos
lowerCamelCase =1 - alpha / big_a
lowerCamelCase =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 / sqrt(2 ) , ) -> IIRFilter:
lowerCamelCase =tau * frequency / samplerate
lowerCamelCase =sin(_UpperCAmelCase )
lowerCamelCase =cos(_UpperCAmelCase )
lowerCamelCase =_sin / (2 * q_factor)
lowerCamelCase =10 ** (gain_db / 40)
lowerCamelCase =(big_a + 1) - (big_a - 1) * _cos
lowerCamelCase =(big_a + 1) + (big_a - 1) * _cos
lowerCamelCase =(big_a - 1) - (big_a + 1) * _cos
lowerCamelCase =(big_a - 1) + (big_a + 1) * _cos
lowerCamelCase =2 * sqrt(_UpperCAmelCase ) * alpha
lowerCamelCase =big_a * (pmc + aaa)
lowerCamelCase =2 * big_a * mpc
lowerCamelCase =big_a * (pmc - aaa)
lowerCamelCase =ppmc + aaa
lowerCamelCase =-2 * pmpc
lowerCamelCase =ppmc - aaa
lowerCamelCase =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 / sqrt(2 ) , ) -> IIRFilter:
lowerCamelCase =tau * frequency / samplerate
lowerCamelCase =sin(_UpperCAmelCase )
lowerCamelCase =cos(_UpperCAmelCase )
lowerCamelCase =_sin / (2 * q_factor)
lowerCamelCase =10 ** (gain_db / 40)
lowerCamelCase =(big_a + 1) - (big_a - 1) * _cos
lowerCamelCase =(big_a + 1) + (big_a - 1) * _cos
lowerCamelCase =(big_a - 1) - (big_a + 1) * _cos
lowerCamelCase =(big_a - 1) + (big_a + 1) * _cos
lowerCamelCase =2 * sqrt(_UpperCAmelCase ) * alpha
lowerCamelCase =big_a * (ppmc + aaa)
lowerCamelCase =-2 * big_a * pmpc
lowerCamelCase =big_a * (ppmc - aaa)
lowerCamelCase =pmc + aaa
lowerCamelCase =2 * mpc
lowerCamelCase =pmc - aaa
lowerCamelCase =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 262 | 1 |
"""simple docstring"""
from maths.prime_check import is_prime
def _a ( _SCREAMING_SNAKE_CASE ) -> int:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
snake_case_ = f"""Input value of [number={number}] must be an integer"""
raise TypeError(_SCREAMING_SNAKE_CASE )
if is_prime(_SCREAMING_SNAKE_CASE ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Tuple = {
'microsoft/beit-base-patch16-224-pt22k': (
'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class __A (snake_case__):
'''simple docstring'''
__lowercase: Optional[int] = """beit"""
def __init__( self : List[str] , UpperCAmelCase_ : List[Any]=8_192 , UpperCAmelCase_ : Dict=768 , UpperCAmelCase_ : int=12 , UpperCAmelCase_ : Tuple=12 , UpperCAmelCase_ : List[Any]=3_072 , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : Dict=0.0 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : Optional[Any]=1E-12 , UpperCAmelCase_ : int=224 , UpperCAmelCase_ : Tuple=16 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Dict=[3, 5, 7, 11] , UpperCAmelCase_ : Tuple=[1, 2, 3, 6] , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : List[Any]=0.4 , UpperCAmelCase_ : Optional[Any]=256 , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Tuple=255 , **UpperCAmelCase_ : List[str] , ) ->Optional[Any]:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = use_mask_token
snake_case_ = use_absolute_position_embeddings
snake_case_ = use_relative_position_bias
snake_case_ = use_shared_relative_position_bias
snake_case_ = layer_scale_init_value
snake_case_ = drop_path_rate
snake_case_ = use_mean_pooling
# decode head attributes (semantic segmentation)
snake_case_ = out_indices
snake_case_ = pool_scales
# auxiliary head attributes (semantic segmentation)
snake_case_ = use_auxiliary_head
snake_case_ = auxiliary_loss_weight
snake_case_ = auxiliary_channels
snake_case_ = auxiliary_num_convs
snake_case_ = auxiliary_concat_input
snake_case_ = semantic_loss_ignore_index
class __A (snake_case__):
'''simple docstring'''
__lowercase: List[Any] = version.parse("""1.11""")
@property
def lowerCAmelCase ( self : Dict ) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase ( self : Any ) ->float:
"""simple docstring"""
return 1E-4
| 347 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case : Tuple = {
"""configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""],
"""tokenization_electra""": ["""ElectraTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Dict = ["""ElectraTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Tuple = [
"""ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ElectraForCausalLM""",
"""ElectraForMaskedLM""",
"""ElectraForMultipleChoice""",
"""ElectraForPreTraining""",
"""ElectraForQuestionAnswering""",
"""ElectraForSequenceClassification""",
"""ElectraForTokenClassification""",
"""ElectraModel""",
"""ElectraPreTrainedModel""",
"""load_tf_weights_in_electra""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[int] = [
"""TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFElectraForMaskedLM""",
"""TFElectraForMultipleChoice""",
"""TFElectraForPreTraining""",
"""TFElectraForQuestionAnswering""",
"""TFElectraForSequenceClassification""",
"""TFElectraForTokenClassification""",
"""TFElectraModel""",
"""TFElectraPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[Any] = [
"""FlaxElectraForCausalLM""",
"""FlaxElectraForMaskedLM""",
"""FlaxElectraForMultipleChoice""",
"""FlaxElectraForPreTraining""",
"""FlaxElectraForQuestionAnswering""",
"""FlaxElectraForSequenceClassification""",
"""FlaxElectraForTokenClassification""",
"""FlaxElectraModel""",
"""FlaxElectraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
_snake_case : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 370 |
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class lowerCAmelCase ( yaml.SafeLoader ):
def UpperCAmelCase ( self :Optional[Any] , _lowercase :Any ):
'''simple docstring'''
lowercase__ = [self.constructed_objects[key_node] for key_node, _ in node.value]
lowercase__ = [tuple(_lowercase ) if isinstance(_lowercase , _lowercase ) else key for key in keys]
lowercase__ = Counter(_lowercase )
lowercase__ = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'''Got duplicate yaml keys: {duplicate_keys}''' )
def UpperCAmelCase ( self :Any , _lowercase :str , _lowercase :Dict=False ):
'''simple docstring'''
lowercase__ = super().construct_mapping(_lowercase , deep=_lowercase )
self._check_no_duplicates_on_constructed_node(_lowercase )
return mapping
def _A ( __magic_name__ ):
lowercase__ = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
lowercase__ = full_content[1:].index("---" ) + 1
lowercase__ = "\n".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(__magic_name__ )
class lowerCAmelCase ( lowercase_ ):
# class attributes
__lowerCamelCase = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def UpperCAmelCase ( cls :Dict , _lowercase :Path ):
'''simple docstring'''
with open(_lowercase , encoding="utf-8" ) as readme_file:
lowercase__ , lowercase__ = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(_lowercase )
else:
return cls()
def UpperCAmelCase ( self :Any , _lowercase :Path ):
'''simple docstring'''
if path.exists():
with open(_lowercase , encoding="utf-8" ) as readme_file:
lowercase__ = readme_file.read()
else:
lowercase__ = None
lowercase__ = self._to_readme(_lowercase )
with open(_lowercase , "w" , encoding="utf-8" ) as readme_file:
readme_file.write(_lowercase )
def UpperCAmelCase ( self :List[str] , _lowercase :Optional[str] = None ):
'''simple docstring'''
if readme_content is not None:
lowercase__ , lowercase__ = _split_yaml_from_readme(_lowercase )
lowercase__ = "---\n" + self.to_yaml_string() + "---\n" + content
else:
lowercase__ = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def UpperCAmelCase ( cls :Any , _lowercase :str ):
'''simple docstring'''
lowercase__ = yaml.load(_lowercase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
lowercase__ = {
(key.replace("-" , "_" ) if key.replace("-" , "_" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**_lowercase )
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace("_" , "-" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=_lowercase , allow_unicode=_lowercase , encoding="utf-8" , ).decode("utf-8" )
_snake_case = {
"""image-classification""": [],
"""translation""": [],
"""image-segmentation""": [],
"""fill-mask""": [],
"""automatic-speech-recognition""": [],
"""token-classification""": [],
"""sentence-similarity""": [],
"""audio-classification""": [],
"""question-answering""": [],
"""summarization""": [],
"""zero-shot-classification""": [],
"""table-to-text""": [],
"""feature-extraction""": [],
"""other""": [],
"""multiple-choice""": [],
"""text-classification""": [],
"""text-to-image""": [],
"""text2text-generation""": [],
"""zero-shot-image-classification""": [],
"""tabular-classification""": [],
"""tabular-regression""": [],
"""image-to-image""": [],
"""tabular-to-text""": [],
"""unconditional-image-generation""": [],
"""text-retrieval""": [],
"""text-to-speech""": [],
"""object-detection""": [],
"""audio-to-audio""": [],
"""text-generation""": [],
"""conversational""": [],
"""table-question-answering""": [],
"""visual-question-answering""": [],
"""image-to-text""": [],
"""reinforcement-learning""": [],
"""voice-activity-detection""": [],
"""time-series-forecasting""": [],
"""document-question-answering""": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
_snake_case = ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""")
ap.add_argument("""readme_filepath""")
_snake_case = ap.parse_args()
_snake_case = Path(args.readme_filepath)
_snake_case = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 201 | 0 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt')
a_ = logging.getLogger(__name__)
@dataclass
class _UpperCamelCase :
'''simple docstring'''
lowerCamelCase__ =field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCamelCase__ =field(
default=UpperCAmelCase__ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
lowerCamelCase__ =field(
default=UpperCAmelCase__ , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
lowerCamelCase__ =field(
default=UpperCAmelCase__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowerCamelCase__ =field(
default=UpperCAmelCase__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
lowerCamelCase__ =field(
default=UpperCAmelCase__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
@dataclass
class _UpperCamelCase :
'''simple docstring'''
lowerCamelCase__ =field(
default=UpperCAmelCase__ , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowerCamelCase__ =field(
default=UpperCAmelCase__ , metadata={'help': 'Evaluation language. Also train language if `train_language` is set to None.'} )
lowerCamelCase__ =field(
default=UpperCAmelCase__ , metadata={'help': 'Train language if it is different from the evaluation language.'} )
lowerCamelCase__ =field(
default=UpperCAmelCase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCamelCase__ =field(
default=UpperCAmelCase__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCamelCase__ =field(
default=UpperCAmelCase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCamelCase__ =field(
default=UpperCAmelCase__ , metadata={'help': 'arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'} , )
lowerCamelCase__ =field(
default=UpperCAmelCase__ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
lowerCamelCase__ =field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowerCamelCase__ =field(
default=UpperCAmelCase__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
lowerCamelCase__ =field(
default=UpperCAmelCase__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def lowerCamelCase__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
SCREAMING_SNAKE_CASE : int = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_xnli" , __lowerCAmelCase)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout)] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : int = training_args.get_process_log_level()
logger.setLevel(__lowerCAmelCase)
datasets.utils.logging.set_verbosity(__lowerCAmelCase)
transformers.utils.logging.set_verbosity(__lowerCAmelCase)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fpaa}")
logger.info(f"Training/evaluation parameters {training_args}")
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE : Dict = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome.")
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch.")
# Set seed before initializing model.
set_seed(training_args.seed)
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
SCREAMING_SNAKE_CASE : List[Any] = load_dataset(
"xnli" , model_args.language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset(
"xnli" , model_args.train_language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE : List[str] = train_dataset.features["label"].names
if training_args.do_eval:
SCREAMING_SNAKE_CASE : Optional[Any] = load_dataset(
"xnli" , model_args.language , split="validation" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE : Optional[Any] = eval_dataset.features["label"].names
if training_args.do_predict:
SCREAMING_SNAKE_CASE : int = load_dataset(
"xnli" , model_args.language , split="test" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE : Dict = predict_dataset.features["label"].names
# Labels
SCREAMING_SNAKE_CASE : Optional[int] = len(__lowerCAmelCase)
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__lowerCAmelCase , idalabel={str(__lowerCAmelCase): label for i, label in enumerate(__lowerCAmelCase)} , labelaid={label: i for i, label in enumerate(__lowerCAmelCase)} , finetuning_task="xnli" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE : str = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path) , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
SCREAMING_SNAKE_CASE : Optional[Any] = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
SCREAMING_SNAKE_CASE : List[str] = False
def preprocess_function(_a):
# Tokenize the texts
return tokenizer(
examples["premise"] , examples["hypothesis"] , padding=__lowerCAmelCase , max_length=data_args.max_seq_length , truncation=__lowerCAmelCase , )
if training_args.do_train:
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE : Any = min(len(__lowerCAmelCase) , data_args.max_train_samples)
SCREAMING_SNAKE_CASE : Union[str, Any] = train_dataset.select(range(__lowerCAmelCase))
with training_args.main_process_first(desc="train dataset map pre-processing"):
SCREAMING_SNAKE_CASE : Optional[Any] = train_dataset.map(
__lowerCAmelCase , batched=__lowerCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on train dataset" , )
# Log a few random samples from the training set:
for index in random.sample(range(len(__lowerCAmelCase)) , 3):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
if training_args.do_eval:
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE : List[str] = min(len(__lowerCAmelCase) , data_args.max_eval_samples)
SCREAMING_SNAKE_CASE : Union[str, Any] = eval_dataset.select(range(__lowerCAmelCase))
with training_args.main_process_first(desc="validation dataset map pre-processing"):
SCREAMING_SNAKE_CASE : Tuple = eval_dataset.map(
__lowerCAmelCase , batched=__lowerCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on validation dataset" , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
SCREAMING_SNAKE_CASE : int = min(len(__lowerCAmelCase) , data_args.max_predict_samples)
SCREAMING_SNAKE_CASE : Dict = predict_dataset.select(range(__lowerCAmelCase))
with training_args.main_process_first(desc="prediction dataset map pre-processing"):
SCREAMING_SNAKE_CASE : Tuple = predict_dataset.map(
__lowerCAmelCase , batched=__lowerCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on prediction dataset" , )
# Get the metric function
SCREAMING_SNAKE_CASE : Dict = evaluate.load("xnli")
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_a):
SCREAMING_SNAKE_CASE : Union[str, Any] = p.predictions[0] if isinstance(p.predictions , __lowerCAmelCase) else p.predictions
SCREAMING_SNAKE_CASE : Union[str, Any] = np.argmax(__lowerCAmelCase , axis=1)
return metric.compute(predictions=__lowerCAmelCase , references=p.label_ids)
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
SCREAMING_SNAKE_CASE : Union[str, Any] = default_data_collator
elif training_args.fpaa:
SCREAMING_SNAKE_CASE : List[Any] = DataCollatorWithPadding(__lowerCAmelCase , pad_to_multiple_of=8)
else:
SCREAMING_SNAKE_CASE : Tuple = None
# Initialize our Trainer
SCREAMING_SNAKE_CASE : int = Trainer(
model=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__lowerCAmelCase , tokenizer=__lowerCAmelCase , data_collator=__lowerCAmelCase , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE : str = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE : int = last_checkpoint
SCREAMING_SNAKE_CASE : List[str] = trainer.train(resume_from_checkpoint=__lowerCAmelCase)
SCREAMING_SNAKE_CASE : int = train_result.metrics
SCREAMING_SNAKE_CASE : Optional[int] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__lowerCAmelCase)
)
SCREAMING_SNAKE_CASE : Any = min(__lowerCAmelCase , len(__lowerCAmelCase))
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" , __lowerCAmelCase)
trainer.save_metrics("train" , __lowerCAmelCase)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
SCREAMING_SNAKE_CASE : Any = trainer.evaluate(eval_dataset=__lowerCAmelCase)
SCREAMING_SNAKE_CASE : Tuple = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__lowerCAmelCase)
SCREAMING_SNAKE_CASE : List[str] = min(__lowerCAmelCase , len(__lowerCAmelCase))
trainer.log_metrics("eval" , __lowerCAmelCase)
trainer.save_metrics("eval" , __lowerCAmelCase)
# Prediction
if training_args.do_predict:
logger.info("*** Predict ***")
SCREAMING_SNAKE_CASE : int = trainer.predict(__lowerCAmelCase , metric_key_prefix="predict")
SCREAMING_SNAKE_CASE : List[Any] = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(__lowerCAmelCase)
)
SCREAMING_SNAKE_CASE : int = min(__lowerCAmelCase , len(__lowerCAmelCase))
trainer.log_metrics("predict" , __lowerCAmelCase)
trainer.save_metrics("predict" , __lowerCAmelCase)
SCREAMING_SNAKE_CASE : Optional[int] = np.argmax(__lowerCAmelCase , axis=1)
SCREAMING_SNAKE_CASE : Dict = os.path.join(training_args.output_dir , "predictions.txt")
if trainer.is_world_process_zero():
with open(__lowerCAmelCase , "w") as writer:
writer.write("index\tprediction\n")
for index, item in enumerate(__lowerCAmelCase):
SCREAMING_SNAKE_CASE : Tuple = label_list[item]
writer.write(f"{index}\t{item}\n")
if __name__ == "__main__":
main() | 76 |
'''simple docstring'''
from collections import deque
from math import floor
from random import random
from time import time
class lowerCAmelCase__ :
def __init__( self : Union[str, Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = {}
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : Tuple , lowerCamelCase__ : List[str] , lowerCamelCase__ : int=1 ) ->Optional[Any]:
'''simple docstring'''
if self.graph.get(lowerCamelCase__ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
_UpperCAmelCase : Dict = [[w, v]]
if not self.graph.get(lowerCamelCase__ ):
_UpperCAmelCase : str = []
def lowerCAmelCase__ ( self : Any ) ->Tuple:
'''simple docstring'''
return list(self.graph )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str ) ->Any:
'''simple docstring'''
if self.graph.get(lowerCamelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCamelCase__ )
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Optional[Any]=-2 , lowerCamelCase__ : Optional[int]=-1 ) ->int:
'''simple docstring'''
if s == d:
return []
_UpperCAmelCase : Tuple = []
_UpperCAmelCase : Optional[int] = []
if s == -2:
_UpperCAmelCase : Union[str, Any] = list(self.graph )[0]
stack.append(lowerCamelCase__ )
visited.append(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : str = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCamelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCamelCase__ ) != 0:
_UpperCAmelCase : Tuple = stack[len(lowerCamelCase__ ) - 1]
else:
_UpperCAmelCase : Optional[int] = ss
# check if se have reached the starting point
if len(lowerCamelCase__ ) == 0:
return visited
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Any=-1 ) ->str:
'''simple docstring'''
if c == -1:
_UpperCAmelCase : str = floor(random() * 1_00_00 ) + 10
for i in range(lowerCamelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
_UpperCAmelCase : Tuple = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCamelCase__ , lowerCamelCase__ , 1 )
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : Union[str, Any]=-2 ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = deque()
_UpperCAmelCase : int = []
if s == -2:
_UpperCAmelCase : str = list(self.graph )[0]
d.append(lowerCamelCase__ )
visited.append(lowerCamelCase__ )
while d:
_UpperCAmelCase : int = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : Union[str, Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Dict = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Union[str, Any] ) ->int:
'''simple docstring'''
return len(self.graph[u] )
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Any=-2 ) ->int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : List[str] = []
if s == -2:
_UpperCAmelCase : str = list(self.graph )[0]
stack.append(lowerCamelCase__ )
visited.append(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = s
_UpperCAmelCase : Tuple = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : str = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowerCamelCase__ ) != 0:
_UpperCAmelCase : Dict = stack[len(lowerCamelCase__ ) - 1]
else:
_UpperCAmelCase : List[str] = ss
# check if se have reached the starting point
if len(lowerCamelCase__ ) == 0:
return sorted_nodes
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = []
_UpperCAmelCase : str = []
_UpperCAmelCase : int = list(self.graph )[0]
stack.append(lowerCamelCase__ )
visited.append(lowerCamelCase__ )
_UpperCAmelCase : str = -2
_UpperCAmelCase : Optional[int] = []
_UpperCAmelCase : Union[str, Any] = s
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : List[str] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : Optional[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase : Optional[int] = len(lowerCamelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : Dict = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase : List[str] = True
if len(lowerCamelCase__ ) != 0:
_UpperCAmelCase : Tuple = stack[len(lowerCamelCase__ ) - 1]
else:
_UpperCAmelCase : Optional[Any] = False
indirect_parents.append(lowerCamelCase__ )
_UpperCAmelCase : Any = s
_UpperCAmelCase : Dict = ss
# check if se have reached the starting point
if len(lowerCamelCase__ ) == 0:
return list(lowerCamelCase__ )
def lowerCAmelCase__ ( self : int ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = []
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : str = list(self.graph )[0]
stack.append(lowerCamelCase__ )
visited.append(lowerCamelCase__ )
_UpperCAmelCase : Any = -2
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : Tuple = s
_UpperCAmelCase : List[str] = False
_UpperCAmelCase : Optional[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : Optional[int] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase : Optional[int] = len(lowerCamelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase : Dict = True
if len(lowerCamelCase__ ) != 0:
_UpperCAmelCase : List[str] = stack[len(lowerCamelCase__ ) - 1]
else:
_UpperCAmelCase : Union[str, Any] = False
indirect_parents.append(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = s
_UpperCAmelCase : Optional[Any] = ss
# check if se have reached the starting point
if len(lowerCamelCase__ ) == 0:
return False
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : Tuple=-2 , lowerCamelCase__ : Union[str, Any]=-1 ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Dict = time()
self.dfs(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Tuple = time()
return end - begin
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Dict=-2 ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = time()
self.bfs(lowerCamelCase__ )
_UpperCAmelCase : Tuple = time()
return end - begin
class lowerCAmelCase__ :
def __init__( self : Tuple ) ->int:
'''simple docstring'''
_UpperCAmelCase : Any = {}
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : Tuple=1 ) ->int:
'''simple docstring'''
if self.graph.get(lowerCamelCase__ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
_UpperCAmelCase : Dict = [[w, v]]
# add the other way
if self.graph.get(lowerCamelCase__ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
_UpperCAmelCase : Tuple = [[w, u]]
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[Any] ) ->Dict:
'''simple docstring'''
if self.graph.get(lowerCamelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCamelCase__ )
# the other way round
if self.graph.get(lowerCamelCase__ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : List[str]=-2 , lowerCamelCase__ : List[str]=-1 ) ->List[Any]:
'''simple docstring'''
if s == d:
return []
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : List[Any] = []
if s == -2:
_UpperCAmelCase : List[Any] = list(self.graph )[0]
stack.append(lowerCamelCase__ )
visited.append(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : Optional[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCamelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCamelCase__ ) != 0:
_UpperCAmelCase : Optional[int] = stack[len(lowerCamelCase__ ) - 1]
else:
_UpperCAmelCase : Union[str, Any] = ss
# check if se have reached the starting point
if len(lowerCamelCase__ ) == 0:
return visited
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : Dict=-1 ) ->str:
'''simple docstring'''
if c == -1:
_UpperCAmelCase : str = floor(random() * 1_00_00 ) + 10
for i in range(lowerCamelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
_UpperCAmelCase : str = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCamelCase__ , lowerCamelCase__ , 1 )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : Union[str, Any]=-2 ) ->Any:
'''simple docstring'''
_UpperCAmelCase : str = deque()
_UpperCAmelCase : Optional[int] = []
if s == -2:
_UpperCAmelCase : Union[str, Any] = list(self.graph )[0]
d.append(lowerCamelCase__ )
visited.append(lowerCamelCase__ )
while d:
_UpperCAmelCase : Any = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Dict ) ->Optional[Any]:
'''simple docstring'''
return len(self.graph[u] )
def lowerCAmelCase__ ( self : str ) ->Any:
'''simple docstring'''
_UpperCAmelCase : int = []
_UpperCAmelCase : str = []
_UpperCAmelCase : Optional[int] = list(self.graph )[0]
stack.append(lowerCamelCase__ )
visited.append(lowerCamelCase__ )
_UpperCAmelCase : Any = -2
_UpperCAmelCase : Union[str, Any] = []
_UpperCAmelCase : str = s
_UpperCAmelCase : int = False
_UpperCAmelCase : List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : int = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase : Union[str, Any] = len(lowerCamelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase : List[Any] = True
if len(lowerCamelCase__ ) != 0:
_UpperCAmelCase : List[str] = stack[len(lowerCamelCase__ ) - 1]
else:
_UpperCAmelCase : Dict = False
indirect_parents.append(lowerCamelCase__ )
_UpperCAmelCase : Dict = s
_UpperCAmelCase : Dict = ss
# check if se have reached the starting point
if len(lowerCamelCase__ ) == 0:
return list(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : Tuple = list(self.graph )[0]
stack.append(lowerCamelCase__ )
visited.append(lowerCamelCase__ )
_UpperCAmelCase : Dict = -2
_UpperCAmelCase : Union[str, Any] = []
_UpperCAmelCase : Dict = s
_UpperCAmelCase : Dict = False
_UpperCAmelCase : str = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : Union[str, Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase : int = len(lowerCamelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase : List[Any] = True
if len(lowerCamelCase__ ) != 0:
_UpperCAmelCase : List[Any] = stack[len(lowerCamelCase__ ) - 1]
else:
_UpperCAmelCase : Optional[Any] = False
indirect_parents.append(lowerCamelCase__ )
_UpperCAmelCase : List[str] = s
_UpperCAmelCase : List[Any] = ss
# check if se have reached the starting point
if len(lowerCamelCase__ ) == 0:
return False
def lowerCAmelCase__ ( self : Optional[Any] ) ->int:
'''simple docstring'''
return list(self.graph )
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Dict=-2 , lowerCamelCase__ : Dict=-1 ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = time()
self.dfs(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : int = time()
return end - begin
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Any=-2 ) ->int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = time()
self.bfs(lowerCamelCase__ )
_UpperCAmelCase : Any = time()
return end - begin
| 234 | 0 |
'''simple docstring'''
import logging
from transformers import PretrainedConfig
__UpperCamelCase = logging.getLogger(__name__)
__UpperCamelCase = {
"bertabs-finetuned-cnndm": "https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json",
}
class _A ( __lowercase ):
lowercase__: List[str] = '''bertabs'''
def __init__( self : Optional[Any] , __magic_name__ : Optional[int]=3_05_22 , __magic_name__ : List[Any]=5_12 , __magic_name__ : Any=6 , __magic_name__ : Dict=5_12 , __magic_name__ : Any=8 , __magic_name__ : Dict=5_12 , __magic_name__ : int=0.2 , __magic_name__ : str=6 , __magic_name__ : int=7_68 , __magic_name__ : Any=8 , __magic_name__ : List[Any]=20_48 , __magic_name__ : Any=0.2 , **__magic_name__ : Union[str, Any] , ) -> int:
"""simple docstring"""
super().__init__(**__magic_name__ )
__snake_case : str = vocab_size
__snake_case : Union[str, Any] = max_pos
__snake_case : Any = enc_layers
__snake_case : List[str] = enc_hidden_size
__snake_case : int = enc_heads
__snake_case : Union[str, Any] = enc_ff_size
__snake_case : List[str] = enc_dropout
__snake_case : Union[str, Any] = dec_layers
__snake_case : List[str] = dec_hidden_size
__snake_case : Optional[int] = dec_heads
__snake_case : Optional[int] = dec_ff_size
__snake_case : Union[str, Any] = dec_dropout
| 13 |
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 13 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Union[str, Any] = logging.get_logger(__name__)
a_ : List[Any] = {
"""vinvino02/glpn-kitti""": """https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json""",
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class __UpperCamelCase ( SCREAMING_SNAKE_CASE__ ):
lowercase : Union[str, Any] ="glpn"
def __init__( self, lowerCAmelCase=3, lowerCAmelCase=4, lowerCAmelCase=[2, 2, 2, 2], lowerCAmelCase=[8, 4, 2, 1], lowerCAmelCase=[32, 64, 160, 256], lowerCAmelCase=[7, 3, 3, 3], lowerCAmelCase=[4, 2, 2, 2], lowerCAmelCase=[1, 2, 5, 8], lowerCAmelCase=[4, 4, 4, 4], lowerCAmelCase="gelu", lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=0.1, lowerCAmelCase=1e-6, lowerCAmelCase=64, lowerCAmelCase=10, lowerCAmelCase=-1, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(**lowerCamelCase__ )
lowerCamelCase_ =num_channels
lowerCamelCase_ =num_encoder_blocks
lowerCamelCase_ =depths
lowerCamelCase_ =sr_ratios
lowerCamelCase_ =hidden_sizes
lowerCamelCase_ =patch_sizes
lowerCamelCase_ =strides
lowerCamelCase_ =mlp_ratios
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =hidden_act
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =initializer_range
lowerCamelCase_ =drop_path_rate
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =decoder_hidden_size
lowerCamelCase_ =max_depth
lowerCamelCase_ =head_in_index
| 75 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = KandinskyVaaControlnetPipeline
__lowerCamelCase : Optional[int] = ["image_embeds", "negative_image_embeds", "hint"]
__lowerCamelCase : Dict = ["image_embeds", "negative_image_embeds", "hint"]
__lowerCamelCase : List[str] = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__lowerCamelCase : Dict = False
@property
def _lowerCAmelCase ( self ):
return 32
@property
def _lowerCAmelCase ( self ):
return 32
@property
def _lowerCAmelCase ( self ):
return self.time_input_dim
@property
def _lowerCAmelCase ( self ):
return self.time_input_dim * 4
@property
def _lowerCAmelCase ( self ):
return 100
@property
def _lowerCAmelCase ( self ):
torch.manual_seed(0 )
A : Any = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
A : List[str] = UNetaDConditionModel(**lowerCamelCase__ )
return model
@property
def _lowerCAmelCase ( self ):
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _lowerCAmelCase ( self ):
torch.manual_seed(0 )
A : List[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def _lowerCAmelCase ( self ):
A : Optional[Any] = self.dummy_unet
A : Tuple = self.dummy_movq
A : List[Any] = DDIMScheduler(
num_train_timesteps=1000, beta_schedule="""linear""", beta_start=0.0_0085, beta_end=0.012, clip_sample=lowerCamelCase__, set_alpha_to_one=lowerCamelCase__, steps_offset=1, prediction_type="""epsilon""", thresholding=lowerCamelCase__, )
A : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__=0 ):
A : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
A : List[Any] = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1 ) ).to(
lowerCamelCase__ )
# create hint
A : int = floats_tensor((1, 3, 64, 64), rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
if str(lowerCamelCase__ ).startswith("""mps""" ):
A : Optional[Any] = torch.manual_seed(lowerCamelCase__ )
else:
A : str = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
A : List[str] = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def _lowerCAmelCase ( self ):
A : Dict = """cpu"""
A : List[str] = self.get_dummy_components()
A : Dict = self.pipeline_class(**lowerCamelCase__ )
A : Optional[Any] = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
A : int = pipe(**self.get_dummy_inputs(lowerCamelCase__ ) )
A : Union[str, Any] = output.images
A : str = pipe(
**self.get_dummy_inputs(lowerCamelCase__ ), return_dict=lowerCamelCase__, )[0]
A : Optional[int] = image[0, -3:, -3:, -1]
A : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A : Dict = np.array(
[0.695_9826, 0.86_8279, 0.755_8092, 0.6876_9467, 0.8580_5804, 0.6597_7496, 0.4488_5302, 0.595_9111, 0.425_1595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self ):
A : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" )
A : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
A : Optional[Any] = torch.from_numpy(np.array(lowerCamelCase__ ) ).float() / 255.0
A : List[str] = hint.permute(2, 0, 1 ).unsqueeze(0 )
A : Union[str, Any] = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""", torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase__ )
A : Tuple = KandinskyVaaControlnetPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""", torch_dtype=torch.floataa )
A : Union[str, Any] = pipeline.to(lowerCamelCase__ )
pipeline.set_progress_bar_config(disable=lowerCamelCase__ )
A : Optional[Any] = """A robot, 4k photo"""
A : Union[str, Any] = torch.Generator(device="""cuda""" ).manual_seed(0 )
A , A : int = pipe_prior(
lowerCamelCase__, generator=lowerCamelCase__, num_inference_steps=5, negative_prompt="""""", ).to_tuple()
A : Union[str, Any] = torch.Generator(device="""cuda""" ).manual_seed(0 )
A : int = pipeline(
image_embeds=lowerCamelCase__, negative_image_embeds=lowerCamelCase__, hint=lowerCamelCase__, generator=lowerCamelCase__, num_inference_steps=100, output_type="""np""", )
A : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(lowerCamelCase__, lowerCamelCase__ )
| 116 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"huggingface/informer-tourism-monthly": (
"https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : int = """informer"""
_lowerCAmelCase : int = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = "student_t" , lowerCAmelCase = "nll" , lowerCAmelCase = 1 , lowerCAmelCase = None , lowerCAmelCase = "mean" , lowerCAmelCase = 0 , lowerCAmelCase = 0 , lowerCAmelCase = 0 , lowerCAmelCase = 0 , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = 64 , lowerCAmelCase = 32 , lowerCAmelCase = 32 , lowerCAmelCase = 2 , lowerCAmelCase = 2 , lowerCAmelCase = 2 , lowerCAmelCase = 2 , lowerCAmelCase = True , lowerCAmelCase = "gelu" , lowerCAmelCase = 0.05 , lowerCAmelCase = 0.1 , lowerCAmelCase = 0.1 , lowerCAmelCase = 0.1 , lowerCAmelCase = 0.1 , lowerCAmelCase = 1_00 , lowerCAmelCase = 0.02 , lowerCAmelCase=True , lowerCAmelCase = "prob" , lowerCAmelCase = 5 , lowerCAmelCase = True , **lowerCAmelCase , ):
"""simple docstring"""
snake_case = prediction_length
snake_case = context_length or prediction_length
snake_case = distribution_output
snake_case = loss
snake_case = input_size
snake_case = num_time_features
snake_case = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
snake_case = scaling
snake_case = num_dynamic_real_features
snake_case = num_static_real_features
snake_case = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
snake_case = cardinality
else:
snake_case = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
snake_case = embedding_dimension
else:
snake_case = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
snake_case = num_parallel_samples
# Transformer architecture configuration
snake_case = input_size * len(self.lags_sequence ) + self._number_of_features
snake_case = d_model
snake_case = encoder_attention_heads
snake_case = decoder_attention_heads
snake_case = encoder_ffn_dim
snake_case = decoder_ffn_dim
snake_case = encoder_layers
snake_case = decoder_layers
snake_case = dropout
snake_case = attention_dropout
snake_case = activation_dropout
snake_case = encoder_layerdrop
snake_case = decoder_layerdrop
snake_case = activation_function
snake_case = init_std
snake_case = use_cache
# Informer
snake_case = attention_type
snake_case = sampling_factor
snake_case = distil
super().__init__(is_encoder_decoder=lowerCAmelCase , **lowerCAmelCase )
@property
def snake_case ( self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 149 | """simple docstring"""
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=99 , lowerCAmelCase=32 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=37 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_12 , lowerCAmelCase=16 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ):
"""simple docstring"""
snake_case = parent
snake_case = batch_size
snake_case = seq_length
snake_case = is_training
snake_case = use_input_mask
snake_case = use_token_type_ids
snake_case = use_labels
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = num_labels
snake_case = num_choices
snake_case = scope
def snake_case ( self ):
"""simple docstring"""
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case = None
if self.use_input_mask:
snake_case = random_attention_mask([self.batch_size, self.seq_length] )
snake_case = None
if self.use_token_type_ids:
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case = None
snake_case = None
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case = ids_tensor([self.batch_size] , self.num_choices )
snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self ):
"""simple docstring"""
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = BioGptModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase )
snake_case = model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
"""simple docstring"""
snake_case = BioGptForCausalLM(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
"""simple docstring"""
snake_case = BioGptModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
# create attention mask
snake_case = torch.ones(input_ids.shape , dtype=torch.long , device=lowerCAmelCase )
snake_case = self.seq_length // 2
snake_case = 0
# first forward pass
snake_case ,snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase ).to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
snake_case = ids_tensor((1,) , lowerCAmelCase ).item() + 1
snake_case = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
snake_case = random_other_next_tokens
# append to next input_ids and attn_mask
snake_case = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowerCAmelCase )] , dim=1 , )
# get two different outputs
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase )['last_hidden_state']
snake_case = model(lowerCAmelCase , past_key_values=lowerCAmelCase , attention_mask=lowerCAmelCase )['last_hidden_state']
# select random slice
snake_case = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case = output_from_no_past[:, -1, random_slice_idx].detach()
snake_case = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-3 ) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
"""simple docstring"""
snake_case = BioGptModel(config=lowerCAmelCase ).to(lowerCAmelCase ).eval()
snake_case = torch.ones(input_ids.shape , dtype=torch.long , device=lowerCAmelCase )
# first forward pass
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase , use_cache=lowerCAmelCase )
snake_case ,snake_case = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
snake_case = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase )['last_hidden_state']
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase , past_key_values=lowerCAmelCase )[
'last_hidden_state'
]
# select random slice
snake_case = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-3 ) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase , lowerCAmelCase=False ):
"""simple docstring"""
snake_case = BioGptForCausalLM(lowerCAmelCase )
model.to(lowerCAmelCase )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
snake_case = model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def snake_case ( self , lowerCAmelCase , *lowerCAmelCase ):
"""simple docstring"""
snake_case = BioGptModel(lowerCAmelCase )
snake_case = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_01 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
"""simple docstring"""
snake_case = self.num_labels
snake_case = BioGptForTokenClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.prepare_config_and_inputs()
(
(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,
) = config_and_inputs
snake_case = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
_lowerCAmelCase : str = (BioGptForCausalLM,) if is_torch_available() else ()
_lowerCAmelCase : str = (
{
"""feature-extraction""": BioGptModel,
"""text-classification""": BioGptForSequenceClassification,
"""text-generation""": BioGptForCausalLM,
"""token-classification""": BioGptForTokenClassification,
"""zero-shot""": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase : List[str] = False
def snake_case ( self ):
"""simple docstring"""
snake_case = BioGptModelTester(self )
snake_case = ConfigTester(self , config_class=lowerCAmelCase , hidden_size=37 )
def snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case = type
self.model_tester.create_and_check_model(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*lowerCAmelCase , gradient_checkpointing=lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*lowerCAmelCase )
@slow
def snake_case ( self ):
"""simple docstring"""
snake_case = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(lowerCAmelCase )
snake_case = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
snake_case = 'left'
# Define PAD Token = EOS Token = 50256
snake_case = tokenizer.eos_token
snake_case = model.config.eos_token_id
# use different length sentences to test batching
snake_case = [
'Hello, my dog is a little',
'Today, I',
]
snake_case = tokenizer(lowerCAmelCase , return_tensors='pt' , padding=lowerCAmelCase )
snake_case = inputs['input_ids'].to(lowerCAmelCase )
snake_case = model.generate(
input_ids=lowerCAmelCase , attention_mask=inputs['attention_mask'].to(lowerCAmelCase ) , )
snake_case = tokenizer(sentences[0] , return_tensors='pt' ).input_ids.to(lowerCAmelCase )
snake_case = model.generate(input_ids=lowerCAmelCase )
snake_case = inputs_non_padded.shape[-1] - inputs['attention_mask'][-1].long().sum().cpu().item()
snake_case = tokenizer(sentences[1] , return_tensors='pt' ).input_ids.to(lowerCAmelCase )
snake_case = model.generate(input_ids=lowerCAmelCase , max_length=model.config.max_length - num_paddings )
snake_case = tokenizer.batch_decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase )
snake_case = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase )
snake_case = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase )
snake_case = [
'Hello, my dog is a little bit bigger than a little bit.',
'Today, I have a good idea of how to use the information',
]
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , [non_padded_sentence, padded_sentence] )
@slow
def snake_case ( self ):
"""simple docstring"""
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case = BioGptModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case ,snake_case = self.model_tester.prepare_config_and_inputs_for_common()
snake_case = 3
snake_case = input_dict['input_ids']
snake_case = input_ids.ne(1 ).to(lowerCAmelCase )
snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case = BioGptForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ):
"""simple docstring"""
snake_case ,snake_case = self.model_tester.prepare_config_and_inputs_for_common()
snake_case = 3
snake_case = 'multi_label_classification'
snake_case = input_dict['input_ids']
snake_case = input_ids.ne(1 ).to(lowerCAmelCase )
snake_case = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
snake_case = BioGptForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case ( self ):
"""simple docstring"""
snake_case = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
snake_case = torch.tensor([[2, 48_05, 9, 6_56, 21]] )
snake_case = model(lowerCAmelCase )[0]
snake_case = 4_23_84
snake_case = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , lowerCAmelCase )
snake_case = torch.tensor(
[[[-9.52_36, -9.89_18, 10.45_57], [-11.04_69, -9.64_23, 8.10_22], [-8.86_64, -7.88_26, 5.53_25]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase , atol=1E-4 ) )
@slow
def snake_case ( self ):
"""simple docstring"""
snake_case = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
snake_case = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(lowerCAmelCase )
torch.manual_seed(0 )
snake_case = tokenizer('COVID-19 is' , return_tensors='pt' ).to(lowerCAmelCase )
snake_case = model.generate(
**lowerCAmelCase , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=lowerCAmelCase , )
snake_case = tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCAmelCase )
snake_case = (
'COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'
' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'
' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'
' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'
' more than 800,000 deaths.'
)
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
| 149 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
def __init__( self: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any=13 , UpperCamelCase__: Optional[Any]=30 , UpperCamelCase__: Dict=2 , UpperCamelCase__: Tuple=3 , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: Optional[Any]=32 , UpperCamelCase__: Optional[int]=5 , UpperCamelCase__: Dict=4 , UpperCamelCase__: Optional[Any]=37 , UpperCamelCase__: List[str]="gelu" , UpperCamelCase__: str=0.1 , UpperCamelCase__: Dict=0.1 , UpperCamelCase__: Dict=10 , UpperCamelCase__: Tuple=0.02 , UpperCamelCase__: Optional[int]=None , UpperCamelCase__: int=2 , ):
lowerCamelCase__ : Any = parent
lowerCamelCase__ : List[str] = batch_size
lowerCamelCase__ : int = image_size
lowerCamelCase__ : str = patch_size
lowerCamelCase__ : Optional[Any] = num_channels
lowerCamelCase__ : int = is_training
lowerCamelCase__ : Union[str, Any] = use_labels
lowerCamelCase__ : Dict = hidden_size
lowerCamelCase__ : str = num_hidden_layers
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : int = intermediate_size
lowerCamelCase__ : Union[str, Any] = hidden_act
lowerCamelCase__ : Optional[Any] = hidden_dropout_prob
lowerCamelCase__ : List[Any] = attention_probs_dropout_prob
lowerCamelCase__ : Tuple = type_sequence_label_size
lowerCamelCase__ : Any = initializer_range
lowerCamelCase__ : int = scope
lowerCamelCase__ : Optional[int] = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase__ : List[str] = (image_size // patch_size) ** 2
lowerCamelCase__ : Dict = num_patches + 1
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : List[str] = None
if self.use_labels:
lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Tuple = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self: List[str] ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: int , UpperCamelCase__: List[Any] , UpperCamelCase__: Any ):
lowerCamelCase__ : Union[str, Any] = ViTModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Dict = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: Any , UpperCamelCase__: Tuple , UpperCamelCase__: str , UpperCamelCase__: Any ):
lowerCamelCase__ : Union[str, Any] = ViTForMaskedImageModeling(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : List[str] = model(UpperCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase__ : Dict = 1
lowerCamelCase__ : Optional[int] = ViTForMaskedImageModeling(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Tuple = model(UpperCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: str , UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : List[Any] = self.type_sequence_label_size
lowerCamelCase__ : Tuple = ViTForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase__ : Dict = 1
lowerCamelCase__ : Optional[Any] = ViTForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : str = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : int = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : str = config_and_inputs
lowerCamelCase__ : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
a = (
{"""feature-extraction""": ViTModel, """image-classification""": ViTForImageClassification}
if is_torch_available()
else {}
)
a = True
a = False
a = False
a = False
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Any = ViTModelTester(self )
lowerCamelCase__ : Optional[int] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self: Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def lowerCamelCase_ ( self: int ):
pass
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[Any] = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase__ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Tuple = [*signature.parameters.keys()]
lowerCamelCase__ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: Union[str, Any] ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : List[str] = ViTModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ () -> Optional[int]:
lowerCamelCase__ : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Dict ):
return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : List[str] = ViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" ).to(UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = self.default_image_processor
lowerCamelCase__ : Any = prepare_img()
lowerCamelCase__ : str = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : Optional[int] = model(**UpperCamelCase__ )
# verify the logits
lowerCamelCase__ : Union[str, Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : Any = torch.tensor([-0.2_744, 0.8_215, -0.0_836] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def lowerCamelCase_ ( self: Dict ):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
lowerCamelCase__ : List[Any] = ViTModel.from_pretrained("""facebook/dino-vits8""" ).to(UpperCamelCase__ )
lowerCamelCase__ : List[str] = ViTImageProcessor.from_pretrained("""facebook/dino-vits8""" , size=480 )
lowerCamelCase__ : Optional[int] = prepare_img()
lowerCamelCase__ : str = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" )
lowerCamelCase__ : Optional[int] = inputs.pixel_values.to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : int = model(UpperCamelCase__ , interpolate_pos_encoding=UpperCamelCase__ )
# verify the logits
lowerCamelCase__ : Optional[Any] = torch.Size((1, 3_601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , UpperCamelCase__ )
lowerCamelCase__ : int = torch.tensor(
[[4.2_340, 4.3_906, -6.6_692], [4.5_463, 1.8_928, -6.7_257], [4.4_429, 0.8_496, -5.8_585]] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : str = ViTModel.from_pretrained("""facebook/dino-vits8""" , torch_dtype=torch.floataa , device_map="""auto""" )
lowerCamelCase__ : int = self.default_image_processor
lowerCamelCase__ : str = prepare_img()
lowerCamelCase__ : Dict = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" )
lowerCamelCase__ : Dict = inputs.pixel_values.to(UpperCamelCase__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowerCamelCase__ : Any = model(UpperCamelCase__ )
| 41 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_A : List[Any] ='''pt'''
elif is_tf_available():
_A : Any ='''tf'''
else:
_A : List[str] ='''jax'''
class _lowercase ( _lowercase , unittest.TestCase ):
a = ByTaTokenizer
a = False
def lowerCamelCase_ ( self: str ):
super().setUp()
lowerCamelCase__ : str = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase_ ( self: Optional[int] ):
return ByTaTokenizer.from_pretrained("""google/byt5-small""" )
def lowerCamelCase_ ( self: Any , **UpperCamelCase__: Tuple ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: str , UpperCamelCase__: Any=False , UpperCamelCase__: Union[str, Any]=20 , UpperCamelCase__: Optional[int]=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
lowerCamelCase__ : List[str] = []
for i in range(len(UpperCamelCase__ ) ):
try:
lowerCamelCase__ : Tuple = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCamelCase__ : Union[str, Any] = list(filter(lambda UpperCamelCase__ : re.match(R"""^[ a-zA-Z]+$""" , t[1] ) , UpperCamelCase__ ) )
lowerCamelCase__ : Tuple = list(filter(lambda UpperCamelCase__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCamelCase__ ) , UpperCamelCase__ ) )
if max_length is not None and len(UpperCamelCase__ ) > max_length:
lowerCamelCase__ : Dict = toks[:max_length]
if min_length is not None and len(UpperCamelCase__ ) < min_length and len(UpperCamelCase__ ) > 0:
while len(UpperCamelCase__ ) < min_length:
lowerCamelCase__ : Optional[Any] = toks + toks
# toks_str = [t[1] for t in toks]
lowerCamelCase__ : Union[str, Any] = [t[0] for t in toks]
# Ensure consistency
lowerCamelCase__ : Union[str, Any] = tokenizer.decode(UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
if " " not in output_txt and len(UpperCamelCase__ ) > 1:
lowerCamelCase__ : Optional[int] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase__ )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase__ )
)
if with_prefix_space:
lowerCamelCase__ : str = """ """ + output_txt
lowerCamelCase__ : List[Any] = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
return output_txt, output_ids
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : str = self.ta_base_tokenizer
lowerCamelCase__ : Union[str, Any] = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] )
lowerCamelCase__ : Optional[int] = tokenizer(["""hi""", """I went to the gym""", """"""] )
self.assertListEqual(batch_with_eos_added["""input_ids"""] , batch_without_eos_added["""input_ids"""] )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Optional[Any] = self.ta_base_tokenizer
lowerCamelCase__ : Dict = """Unicode €."""
lowerCamelCase__ : List[Any] = tokenizer(UpperCamelCase__ )
lowerCamelCase__ : List[str] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["""input_ids"""] , UpperCamelCase__ )
# decoding
lowerCamelCase__ : Tuple = tokenizer.decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , """Unicode €.</s>""" )
lowerCamelCase__ : List[Any] = tokenizer("""e è é ê ë""" )
lowerCamelCase__ : List[Any] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["""input_ids"""] , UpperCamelCase__ )
# decoding
lowerCamelCase__ : str = tokenizer.decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , """e è é ê ë</s>""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """e è é ê ë</s>""" )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : int = self.ta_base_tokenizer
lowerCamelCase__ : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
lowerCamelCase__ : List[str] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
lowerCamelCase__ : int = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
if FRAMEWORK != "jax":
lowerCamelCase__ : Any = list(batch.input_ids.numpy()[0] )
else:
lowerCamelCase__ : str = list(batch.input_ids.tolist()[0] )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : List[str] = self.ta_base_tokenizer
lowerCamelCase__ : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
lowerCamelCase__ : Tuple = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""" , UpperCamelCase__ )
self.assertIn("""attention_mask""" , UpperCamelCase__ )
self.assertNotIn("""decoder_input_ids""" , UpperCamelCase__ )
self.assertNotIn("""decoder_attention_mask""" , UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : str = self.ta_base_tokenizer
lowerCamelCase__ : List[Any] = [
"""Summary of the text.""",
"""Another summary.""",
]
lowerCamelCase__ : Union[str, Any] = tokenizer(
text_target=UpperCamelCase__ , max_length=32 , padding="""max_length""" , truncation=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Tuple = self.ta_base_tokenizer
lowerCamelCase__ : str = ["""A long paragraph for summarization. </s>"""]
lowerCamelCase__ : Optional[Any] = ["""Summary of the text. </s>"""]
# fmt: off
lowerCamelCase__ : Tuple = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
lowerCamelCase__ : Any = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
lowerCamelCase__ : Any = tokenizer(UpperCamelCase__ , text_target=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , batch["""input_ids"""][0] )
self.assertEqual(UpperCamelCase__ , batch["""labels"""][0] )
def lowerCamelCase_ ( self: Optional[int] ):
# safety check on max_len default value so we are sure the test works
lowerCamelCase__ : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowerCamelCase__ : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : int = tempfile.mkdtemp()
lowerCamelCase__ : List[str] = """ He is very happy, UNwant\u00E9d,running"""
lowerCamelCase__ : List[Any] = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
tokenizer.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = tokenizer.__class__.from_pretrained(UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = after_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
shutil.rmtree(UpperCamelCase__ )
lowerCamelCase__ : Any = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : Any = tempfile.mkdtemp()
lowerCamelCase__ : Optional[Any] = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
lowerCamelCase__ : Optional[int] = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
lowerCamelCase__ : List[Any] = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
tokenizer.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = tokenizer.__class__.from_pretrained(UpperCamelCase__ )
lowerCamelCase__ : int = after_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowerCamelCase__ : Any = tokenizer.__class__.from_pretrained(UpperCamelCase__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
lowerCamelCase__ : Union[str, Any] = json.load(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
lowerCamelCase__ : Optional[Any] = json.load(UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = [F'''<extra_id_{i}>''' for i in range(125 )]
lowerCamelCase__ : int = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
lowerCamelCase__ : Optional[Any] = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(UpperCamelCase__ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCamelCase__ : Dict = tokenizer_class.from_pretrained(
UpperCamelCase__ , )
self.assertIn(
"""an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCamelCase__ : Optional[Any] = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=UpperCamelCase__ )]
lowerCamelCase__ : Any = tokenizer_class.from_pretrained(
UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , )
self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Dict = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : str = tokenizer_class.from_pretrained(UpperCamelCase__ )
self.assertTrue(tokenizer.decode([255] ) == """""" )
def lowerCamelCase_ ( self: Optional[int] ):
pass
def lowerCamelCase_ ( self: str ):
pass
def lowerCamelCase_ ( self: List[str] ):
pass
def lowerCamelCase_ ( self: Optional[int] ):
pass
def lowerCamelCase_ ( self: int ):
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
lowerCamelCase__ : Dict = self.get_tokenizers(fast=UpperCamelCase__ , do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowerCamelCase__ : Dict = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""]
lowerCamelCase__ : Optional[int] = tokenizer.convert_tokens_to_string(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowerCamelCase__ : str = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
lowerCamelCase__ : str = 0
lowerCamelCase__ : Any = tokenizer.convert_ids_to_tokens(
UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
for attr in attributes_list:
setattr(UpperCamelCase__ , attr + """_id""" , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , attr + """_id""" ) , UpperCamelCase__ )
setattr(UpperCamelCase__ , attr + """_id""" , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , attr + """_id""" ) , UpperCamelCase__ )
setattr(UpperCamelCase__ , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(UpperCamelCase__ , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(UpperCamelCase__ , """additional_special_tokens_ids""" ) , [] )
setattr(UpperCamelCase__ , """additional_special_tokens_ids""" , [token_id_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase__ , """additional_special_tokens""" ) , [token_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase__ , """additional_special_tokens_ids""" ) , [token_id_to_test_setters] )
| 41 | 1 |
'''simple docstring'''
import numpy as np
def a__ ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : float ) -> np.ndarray:
"""simple docstring"""
return np.where(vector > 0 , _SCREAMING_SNAKE_CASE , (alpha * (np.exp(_SCREAMING_SNAKE_CASE ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int ) -> Any:
"""simple docstring"""
if gpta_config_file == "":
UpperCAmelCase_ : List[str] = GPTaConfig()
else:
UpperCAmelCase_ : int = GPTaConfig.from_json_file(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = GPTaModel(_SCREAMING_SNAKE_CASE )
# Load weights from numpy
load_tf_weights_in_gpta(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save pytorch-model
UpperCAmelCase_ : Dict = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
UpperCAmelCase_ : int = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--gpt2_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
_lowerCamelCase = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 67 | 0 |
'''simple docstring'''
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class lowercase_ ( a__ , unittest.TestCase ):
__UpperCAmelCase = BertJapaneseTokenizer
__UpperCAmelCase = False
__UpperCAmelCase = True
def __a ( self ):
super().setUp()
UpperCamelCase__ = [
"[UNK]",
"[CLS]",
"[SEP]",
"こんにちは",
"こん",
"にちは",
"ばんは",
"##こん",
"##にちは",
"##ばんは",
"世界",
"##世界",
"、",
"##、",
"。",
"##。",
]
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def __a ( self , a ):
UpperCamelCase__ = "こんにちは、世界。 \nこんばんは、世界。"
UpperCamelCase__ = "こんにちは 、 世界 。 こんばんは 、 世界 。"
return input_text, output_text
def __a ( self , a ):
UpperCamelCase__ , UpperCamelCase__ = self.get_input_output_texts(a )
UpperCamelCase__ = tokenizer.encode(a , add_special_tokens=a )
UpperCamelCase__ = tokenizer.decode(a , clean_up_tokenization_spaces=a )
return text, ids
def __a ( self ):
pass # TODO add if relevant
def __a ( self ):
pass # TODO add if relevant
def __a ( self ):
pass # TODO add if relevant
def __a ( self ):
UpperCamelCase__ = self.tokenizer_class(self.vocab_file )
UpperCamelCase__ = tokenizer.tokenize("こんにちは、世界。\nこんばんは、世界。" )
self.assertListEqual(a , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def __a ( self ):
UpperCamelCase__ = self.tokenizer_class(self.vocab_file , word_tokenizer_type="mecab" )
self.assertIsNotNone(a )
UpperCamelCase__ = "こんにちは、世界。\nこんばんは、世界。"
UpperCamelCase__ = tokenizer.tokenize(a )
self.assertListEqual(a , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
UpperCamelCase__ = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(a , "wb" ) as handle:
pickle.dump(a , a )
with open(a , "rb" ) as handle:
UpperCamelCase__ = pickle.load(a )
UpperCamelCase__ = tokenizer_new.tokenize(a )
self.assertListEqual(a , a )
def __a ( self ):
UpperCamelCase__ = MecabTokenizer(mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def __a ( self ):
try:
UpperCamelCase__ = MecabTokenizer(mecab_dic="unidic_lite" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def __a ( self ):
try:
UpperCamelCase__ = MecabTokenizer(mecab_dic="unidic" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def __a ( self ):
UpperCamelCase__ = MecabTokenizer(do_lower_case=a , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iphone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def __a ( self ):
try:
UpperCamelCase__ = MecabTokenizer(
do_lower_case=a , normalize_text=a , mecab_option="-d /usr/local/lib/mecab/dic/jumandic" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "\u3000", "。"] , )
def __a ( self ):
UpperCamelCase__ = MecabTokenizer(normalize_text=a , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", " ", "。"] , )
@require_sudachi
def __a ( self ):
UpperCamelCase__ = self.tokenizer_class(self.vocab_file , word_tokenizer_type="sudachi" )
self.assertIsNotNone(a )
UpperCamelCase__ = "こんにちは、世界。\nこんばんは、世界。"
UpperCamelCase__ = tokenizer.tokenize(a )
self.assertListEqual(a , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
UpperCamelCase__ = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(a , "wb" ) as handle:
pickle.dump(a , a )
with open(a , "rb" ) as handle:
UpperCamelCase__ = pickle.load(a )
UpperCamelCase__ = tokenizer_new.tokenize(a )
self.assertListEqual(a , a )
@require_sudachi
def __a ( self ):
UpperCamelCase__ = SudachiTokenizer(sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def __a ( self ):
UpperCamelCase__ = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="A" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国", "人", "参政", "権"] )
@require_sudachi
def __a ( self ):
UpperCamelCase__ = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="B" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人", "参政権"] )
@require_sudachi
def __a ( self ):
UpperCamelCase__ = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="C" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人参政権"] )
@require_sudachi
def __a ( self ):
UpperCamelCase__ = SudachiTokenizer(do_lower_case=a , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iphone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def __a ( self ):
UpperCamelCase__ = SudachiTokenizer(normalize_text=a , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", "\u3000", "。", " ", " "] , )
@require_sudachi
def __a ( self ):
UpperCamelCase__ = SudachiTokenizer(trim_whitespace=a , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
@require_jumanpp
def __a ( self ):
UpperCamelCase__ = self.tokenizer_class(self.vocab_file , word_tokenizer_type="jumanpp" )
self.assertIsNotNone(a )
UpperCamelCase__ = "こんにちは、世界。\nこんばんは、世界。"
UpperCamelCase__ = tokenizer.tokenize(a )
self.assertListEqual(a , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
UpperCamelCase__ = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(a , "wb" ) as handle:
pickle.dump(a , a )
with open(a , "rb" ) as handle:
UpperCamelCase__ = pickle.load(a )
UpperCamelCase__ = tokenizer_new.tokenize(a )
self.assertListEqual(a , a )
@require_jumanpp
def __a ( self ):
UpperCamelCase__ = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def __a ( self ):
UpperCamelCase__ = JumanppTokenizer(do_lower_case=a )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iphone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def __a ( self ):
UpperCamelCase__ = JumanppTokenizer(normalize_text=a )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["ア", "ッ", "フ", "゚", "ル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def __a ( self ):
UpperCamelCase__ = JumanppTokenizer(trim_whitespace=a )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "。"] , )
@require_jumanpp
def __a ( self ):
UpperCamelCase__ = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("ありがとうございますm(_ _)m見つけるのが大変です。" ) , ["ありがとう", "ございます", "m(_ _)m", "見つける", "の", "が", "大変です", "。"] , )
def __a ( self ):
UpperCamelCase__ = ["[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは"]
UpperCamelCase__ = {}
for i, token in enumerate(a ):
UpperCamelCase__ = i
UpperCamelCase__ = WordpieceTokenizer(vocab=a , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こんにちは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは" ) , ["こん", "##ばんは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは こんばんにちは こんにちは" ) , ["こん", "##ばんは", "[UNK]", "こんにちは"] )
def __a ( self ):
UpperCamelCase__ = BertJapaneseTokenizer.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp" )
UpperCamelCase__ = tokenizer.subword_tokenizer
UpperCamelCase__ = subword_tokenizer.tokenize("国境 の 長い トンネル を 抜ける と 雪国 であった 。" )
self.assertListEqual(a , ["▁国境", "▁の", "▁長い", "▁トンネル", "▁を", "▁抜ける", "▁と", "▁雪", "国", "▁であった", "▁。"] )
UpperCamelCase__ = subword_tokenizer.tokenize("こんばんは こんばん にち は こんにちは" )
self.assertListEqual(a , ["▁こん", "ばん", "は", "▁こん", "ばん", "▁に", "ち", "▁は", "▁こんにちは"] )
def __a ( self ):
UpperCamelCase__ = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese" )
UpperCamelCase__ = tokenizer.encode("ありがとう。" , add_special_tokens=a )
UpperCamelCase__ = tokenizer.encode("どういたしまして。" , add_special_tokens=a )
UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(a )
UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(a , a )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowercase_ ( a__ , unittest.TestCase ):
__UpperCAmelCase = BertJapaneseTokenizer
__UpperCAmelCase = False
def __a ( self ):
super().setUp()
UpperCamelCase__ = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def __a ( self , **a ):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="character" , **a )
def __a ( self , a ):
UpperCamelCase__ = "こんにちは、世界。 \nこんばんは、世界。"
UpperCamelCase__ = "こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"
return input_text, output_text
def __a ( self ):
pass # TODO add if relevant
def __a ( self ):
pass # TODO add if relevant
def __a ( self ):
pass # TODO add if relevant
def __a ( self ):
UpperCamelCase__ = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="character" )
UpperCamelCase__ = tokenizer.tokenize("こんにちは、世界。 \nこんばんは、世界。" )
self.assertListEqual(
a , ["こ", "ん", "に", "ち", "は", "、", "世", "界", "。", "こ", "ん", "ば", "ん", "は", "、", "世", "界", "。"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def __a ( self ):
UpperCamelCase__ = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
UpperCamelCase__ = {}
for i, token in enumerate(a ):
UpperCamelCase__ = i
UpperCamelCase__ = CharacterTokenizer(vocab=a , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こ", "ん", "に", "ち", "は"] )
self.assertListEqual(tokenizer.tokenize("こんにちほ" ) , ["こ", "ん", "に", "ち", "[UNK]"] )
def __a ( self ):
UpperCamelCase__ = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese-char" )
UpperCamelCase__ = tokenizer.encode("ありがとう。" , add_special_tokens=a )
UpperCamelCase__ = tokenizer.encode("どういたしまして。" , add_special_tokens=a )
UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(a )
UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(a , a )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowercase_ ( unittest.TestCase ):
def __a ( self ):
UpperCamelCase__ = "cl-tohoku/bert-base-japanese"
UpperCamelCase__ = AutoTokenizer.from_pretrained(a )
self.assertIsInstance(a , a )
class lowercase_ ( unittest.TestCase ):
def __a ( self ):
UpperCamelCase__ = "cl-tohoku/bert-base-japanese"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertTokenizer.from_pretrained(a )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
UpperCamelCase__ = "bert-base-cased"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertJapaneseTokenizer.from_pretrained(a )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
| 80 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=4 , ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = parent
UpperCAmelCase__ : Any = batch_size
UpperCAmelCase__ : Optional[int] = seq_length
UpperCAmelCase__ : Any = is_training
UpperCAmelCase__ : int = use_attention_mask
UpperCAmelCase__ : Any = use_token_type_ids
UpperCAmelCase__ : Any = use_labels
UpperCAmelCase__ : Union[str, Any] = vocab_size
UpperCAmelCase__ : str = hidden_size
UpperCAmelCase__ : List[Any] = num_hidden_layers
UpperCAmelCase__ : Optional[Any] = num_attention_heads
UpperCAmelCase__ : str = intermediate_size
UpperCAmelCase__ : Dict = hidden_act
UpperCAmelCase__ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase__ : str = attention_probs_dropout_prob
UpperCAmelCase__ : str = max_position_embeddings
UpperCAmelCase__ : Union[str, Any] = type_vocab_size
UpperCAmelCase__ : Dict = type_sequence_label_size
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : int = num_choices
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : int = None
if self.use_attention_mask:
UpperCAmelCase__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ : List[Any] = None
if self.use_token_type_ids:
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ : Optional[Any] = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : int = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Tuple = config_and_inputs
UpperCAmelCase__ : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = config_and_inputs
UpperCAmelCase__ : Dict = True
UpperCAmelCase__ : Optional[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowerCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = FlaxRobertaModelTester(self )
@slow
def _a (self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCAmelCase__ : Dict = model_class_name.from_pretrained("""roberta-base""" , from_pt=_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCamelCase )
| 171 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase__ = {
"configuration_falcon": ["FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP", "FalconConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"FALCON_PRETRAINED_MODEL_ARCHIVE_LIST",
"FalconForCausalLM",
"FalconModel",
"FalconPreTrainedModel",
"FalconForSequenceClassification",
"FalconForTokenClassification",
"FalconForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 280 |
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
lowercase__ = {
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def UpperCamelCase( UpperCAmelCase_ ):
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
UpperCAmelCase : List[Any] = list(s_dict.keys() )
for key in keys:
UpperCAmelCase : Union[str, Any] = R'.*/layers_(\d+)'
UpperCAmelCase : List[str] = key
if re.match(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = re.sub(R'layers_(\d+)' , R'block/\1/layer' , UpperCAmelCase_ )
UpperCAmelCase : str = R'(encoder|decoder)\/'
if re.match(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Tuple = re.match(UpperCAmelCase_ , UpperCAmelCase_ ).groups()
if groups[0] == "encoder":
UpperCAmelCase : Union[str, Any] = re.sub(R'/mlp/' , R'/1/mlp/' , UpperCAmelCase_ )
UpperCAmelCase : List[Any] = re.sub(R'/pre_mlp_layer_norm/' , R'/1/layer_norm/' , UpperCAmelCase_ )
elif groups[0] == "decoder":
UpperCAmelCase : Tuple = re.sub(R'/mlp/' , R'/2/mlp/' , UpperCAmelCase_ )
UpperCAmelCase : Union[str, Any] = re.sub(R'/pre_mlp_layer_norm/' , R'/2/layer_norm/' , UpperCAmelCase_ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
UpperCAmelCase : List[str] = new_key.replace(UpperCAmelCase_ , UpperCAmelCase_ )
print(F"""{key} -> {new_key}""" )
UpperCAmelCase : List[Any] = s_dict.pop(UpperCAmelCase_ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
UpperCAmelCase : Optional[int] = s_dict[
'encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
UpperCAmelCase : Optional[int] = s_dict[
'decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
UpperCAmelCase : List[str] = s_dict[key].shape[0]
UpperCAmelCase : List[Any] = s_dict[key]
for idx in range(UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = expert_weihts[idx]
print(F"""{key} -> {key.replace("expert/" , "nested fstring" )}""" )
s_dict.pop(UpperCAmelCase_ )
return s_dict
lowercase__ = {
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
# Convert a google style config to the hugging face fromat
import regex as re
with open(UpperCAmelCase_ , 'r' ) as f:
UpperCAmelCase : Union[str, Any] = f.read()
UpperCAmelCase : Union[str, Any] = re.findall(R'(.*) = ([0-9.]*)' , UpperCAmelCase_ )
UpperCAmelCase : str = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
UpperCAmelCase : Dict = float(UpperCAmelCase_ ) if '.' in value else int(UpperCAmelCase_ )
UpperCAmelCase : str = re.findall(R'(.*activations) = \(\'(.*)\',\)' , UpperCAmelCase_ )[0]
UpperCAmelCase : Union[str, Any] = str(activation[1] )
UpperCAmelCase : Optional[int] = num_experts
UpperCAmelCase : List[str] = SwitchTransformersConfig(**UpperCAmelCase_ )
return config
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_="./" , UpperCAmelCase_=8 ):
# Initialise PyTorch model
print(F"""Loading flax weights from : {flax_checkpoint_path}""" )
UpperCAmelCase : List[Any] = checkpoints.load_tax_checkpoint(UpperCAmelCase_ )
if gin_file is not None:
UpperCAmelCase : List[Any] = convert_gin_to_config(UpperCAmelCase_ , UpperCAmelCase_ )
else:
UpperCAmelCase : str = SwitchTransformersConfig.from_pretrained(UpperCAmelCase_ )
UpperCAmelCase : str = SwitchTransformersForConditionalGeneration(UpperCAmelCase_ )
UpperCAmelCase : str = flax_params['target']
UpperCAmelCase : Union[str, Any] = flatten_dict(UpperCAmelCase_ , sep='/' )
UpperCAmelCase : Tuple = rename_keys(UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = unflatten_dict(UpperCAmelCase_ , sep='/' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(UpperCAmelCase_ , UpperCAmelCase_ )
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
pt_model.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
lowercase__ = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 280 | 1 |
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
lowerCAmelCase__ : str = 1.5
lowerCAmelCase__ : List[Any] = int(factor * num_class_images )
lowerCAmelCase__ : Dict = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=SCREAMING_SNAKE_CASE_ , aesthetic_weight=0.1 )
os.makedirs(F'''{class_data_dir}/images''' , exist_ok=SCREAMING_SNAKE_CASE_ )
if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
lowerCAmelCase__ : Dict = client.query(text=SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) >= factor * num_class_images or num_images > 1e4:
break
else:
lowerCAmelCase__ : Dict = int(factor * num_images )
lowerCAmelCase__ : Any = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=SCREAMING_SNAKE_CASE_ , aesthetic_weight=0.1 , )
lowerCAmelCase__ : Any = 0
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ : int = tqdm(desc='downloading real regularization images' , total=SCREAMING_SNAKE_CASE_ )
with open(F'''{class_data_dir}/caption.txt''' , 'w' ) as fa, open(F'''{class_data_dir}/urls.txt''' , 'w' ) as fa, open(
F'''{class_data_dir}/images.txt''' , 'w' ) as fa:
while total < num_class_images:
lowerCAmelCase__ : List[Any] = class_images[count]
count += 1
try:
lowerCAmelCase__ : Optional[Any] = requests.get(images['url'] )
if img.status_code == 200:
lowerCAmelCase__ : List[str] = Image.open(BytesIO(img.content ) )
with open(F'''{class_data_dir}/images/{total}.jpg''' , 'wb' ) as f:
f.write(img.content )
fa.write(images['caption'] + '\n' )
fa.write(images['url'] + '\n' )
fa.write(F'''{class_data_dir}/images/{total}.jpg''' + '\n' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowerCAmelCase__ ( ) -> str:
lowerCAmelCase__ : Optional[int] = argparse.ArgumentParser('' , add_help=SCREAMING_SNAKE_CASE_ )
parser.add_argument('--class_prompt' , help='text prompt to retrieve images' , required=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ )
parser.add_argument('--class_data_dir' , help='path to save images' , required=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ )
parser.add_argument('--num_class_images' , help='number of images to download' , default=200 , type=SCREAMING_SNAKE_CASE_ )
return parser.parse_args()
if __name__ == "__main__":
lowerCamelCase__ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images) | 212 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(__magic_name__ )
class A__ ( __magic_name__ ):
def __init__( self : int , *a : Dict , **a : Union[str, Any] ):
'''simple docstring'''
super().__init__(*a , **a )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def _lowerCamelCase ( self : Dict , a : List[str]=None ):
'''simple docstring'''
lowerCAmelCase__ : Any = {}
if top_k is not None:
lowerCAmelCase__ : Tuple = top_k
return {}, {}, postprocess_params
def __call__( self : Any , a : Union[str, List[str], "Image.Image", List["Image.Image"]] , **a : List[Any] ):
'''simple docstring'''
return super().__call__(a , **a )
def _lowerCamelCase ( self : Any , a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = load_image(a )
lowerCAmelCase__ : Optional[int] = self.image_processor(images=a , return_tensors=self.framework )
return model_inputs
def _lowerCamelCase ( self : Optional[int] , a : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.model(**a )
return model_outputs
def _lowerCamelCase ( self : Optional[Any] , a : List[Any] , a : List[Any]=5 ):
'''simple docstring'''
if top_k > self.model.config.num_labels:
lowerCAmelCase__ : Optional[int] = self.model.config.num_labels
if self.framework == "pt":
lowerCAmelCase__ : List[Any] = model_outputs.logits.softmax(-1 )[0]
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = probs.topk(a )
elif self.framework == "tf":
lowerCAmelCase__ : Any = stable_softmax(model_outputs.logits , axis=-1 )[0]
lowerCAmelCase__ : Any = tf.math.top_k(a , k=a )
lowerCAmelCase__ , lowerCAmelCase__ : int = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
lowerCAmelCase__ : List[Any] = scores.tolist()
lowerCAmelCase__ : List[str] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(a , a )] | 212 | 1 |
'''simple docstring'''
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowercase : str = input("Enter image url: ").strip()
print(F'''Downloading image from {url} ...''')
lowercase : int = BeautifulSoup(requests.get(url).content, "html.parser")
# The image URL is in the content field of the first meta tag with property og:image
lowercase : List[str] = soup.find("meta", {"property": "og:image"})["content"]
lowercase : str = requests.get(image_url).content
lowercase : Dict = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'''
with open(file_name, "wb") as fp:
fp.write(image_data)
print(F'''Done. Image saved to disk as {file_name}.''')
| 160 |
'''simple docstring'''
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def SCREAMING_SNAKE_CASE__ ( __A ) -> Union[str, Any]:
if isinstance(__A , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __UpperCAmelCase :
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = np.abs((a - b) ).max()
self.assertLessEqual(lowerCAmelCase_ , lowerCAmelCase_ , F'Difference between torch and flax is {diff} (>= {tol}).' )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , **lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = FlaxVisionTextDualEncoderModel(lowerCAmelCase_ )
_snake_case = model(input_ids=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , **lowerCAmelCase_ ):
"""simple docstring"""
_snake_case , _snake_case = self.get_vision_text_model(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = {'vision_model': vision_model, 'text_model': text_model}
_snake_case = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase_ )
_snake_case = model(input_ids=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , **lowerCAmelCase_ ):
"""simple docstring"""
_snake_case , _snake_case = self.get_vision_text_model(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = {'vision_model': vision_model, 'text_model': text_model}
_snake_case = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase_ )
_snake_case = model(input_ids=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )
_snake_case = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase_ )
_snake_case = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase_ )
_snake_case = model(input_ids=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )
_snake_case = after_output[0]
_snake_case = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase_ , 1E-3 )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , **lowerCAmelCase_ ):
"""simple docstring"""
_snake_case , _snake_case = self.get_vision_text_model(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = {'vision_model': vision_model, 'text_model': text_model}
_snake_case = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase_ )
_snake_case = model(
input_ids=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , output_attentions=lowerCAmelCase_ )
_snake_case = output.vision_model_output.attentions
self.assertEqual(len(lowerCAmelCase_ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_snake_case = to_atuple(vision_model.config.image_size )
_snake_case = to_atuple(vision_model.config.patch_size )
_snake_case = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_snake_case = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_snake_case = output.text_model_output.attentions
self.assertEqual(len(lowerCAmelCase_ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
pt_model.to(lowerCAmelCase_ )
pt_model.eval()
# prepare inputs
_snake_case = inputs_dict
_snake_case = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
_snake_case = pt_model(**lowerCAmelCase_ ).to_tuple()
_snake_case = fx_model(**lowerCAmelCase_ ).to_tuple()
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCAmelCase_ , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCAmelCase_ )
_snake_case = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
_snake_case = fx_model_loaded(**lowerCAmelCase_ ).to_tuple()
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCAmelCase_ , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCAmelCase_ )
_snake_case = VisionTextDualEncoderModel.from_pretrained(lowerCAmelCase_ , from_flax=lowerCAmelCase_ )
pt_model_loaded.to(lowerCAmelCase_ )
pt_model_loaded.eval()
with torch.no_grad():
_snake_case = pt_model_loaded(**lowerCAmelCase_ ).to_tuple()
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(lowerCAmelCase_ , pt_output_loaded.numpy() , 4E-2 )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = VisionTextDualEncoderModel(lowerCAmelCase_ )
_snake_case = FlaxVisionTextDualEncoderModel(lowerCAmelCase_ )
_snake_case = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCAmelCase_ )
_snake_case = fx_state
self.check_pt_flax_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = VisionTextDualEncoderModel(lowerCAmelCase_ )
_snake_case = FlaxVisionTextDualEncoderModel(lowerCAmelCase_ )
_snake_case = load_flax_weights_in_pytorch_model(lowerCAmelCase_ , fx_model.params )
self.check_pt_flax_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.prepare_config_and_inputs()
self.check_save_load(**lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCAmelCase_ )
@is_pt_flax_cross_test
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.prepare_config_and_inputs()
_snake_case = config_inputs_dict.pop('vision_config' )
_snake_case = config_inputs_dict.pop('text_config' )
_snake_case = config_inputs_dict
self.check_equivalence_pt_to_flax(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
self.check_equivalence_flax_to_pt(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case , _snake_case = self.get_pretrained_model_and_inputs()
_snake_case = model_a(**lowerCAmelCase_ )
_snake_case = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCAmelCase_ )
_snake_case = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase_ )
_snake_case = model_a(**lowerCAmelCase_ )
_snake_case = after_outputs[0]
_snake_case = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase_ , 1E-5 )
@require_flax
class __UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-bert' , vision_from_pt=lowerCAmelCase_ , text_from_pt=lowerCAmelCase_ , )
_snake_case = 13
_snake_case = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
_snake_case = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
_snake_case = random_attention_mask([batch_size, 4] )
_snake_case = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = FlaxViTModel(lowerCAmelCase_ )
_snake_case = FlaxBertModel(lowerCAmelCase_ )
return vision_model, text_model
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = FlaxViTModelTester(self )
_snake_case = FlaxBertModelTester(self )
_snake_case = vit_model_tester.prepare_config_and_inputs()
_snake_case = bert_model_tester.prepare_config_and_inputs()
_snake_case , _snake_case = vision_config_and_inputs
_snake_case , _snake_case , _snake_case , _snake_case = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-clip' , 'hf-internal-testing/tiny-bert' , vision_from_pt=lowerCAmelCase_ , text_from_pt=lowerCAmelCase_ , )
_snake_case = 13
_snake_case = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
_snake_case = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
_snake_case = random_attention_mask([batch_size, 4] )
_snake_case = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = FlaxCLIPVisionModel(lowerCAmelCase_ )
_snake_case = FlaxBertModel(lowerCAmelCase_ )
return vision_model, text_model
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = FlaxCLIPVisionModelTester(self )
_snake_case = FlaxBertModelTester(self )
_snake_case = clip_model_tester.prepare_config_and_inputs()
_snake_case = bert_model_tester.prepare_config_and_inputs()
_snake_case , _snake_case = vision_config_and_inputs
_snake_case , _snake_case , _snake_case , _snake_case = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = FlaxVisionTextDualEncoderModel.from_pretrained('clip-italian/clip-italian' , logit_scale_init_value=1.0 )
_snake_case = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
_snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_snake_case = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='np' )
_snake_case = model(**lowerCAmelCase_ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_snake_case = np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , lowerCAmelCase_ , atol=1E-3 ) )
| 160 | 1 |
'''simple docstring'''
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__)
__SCREAMING_SNAKE_CASE : int = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
__SCREAMING_SNAKE_CASE : int = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCamelCase_ :
'''simple docstring'''
__UpperCamelCase: Optional[str] = field(
default=snake_case__ , metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
} , )
__UpperCamelCase: Optional[str] = field(
default=snake_case__ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(snake_case__ )} , )
__UpperCamelCase: Optional[str] = field(
default=snake_case__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__UpperCamelCase: Optional[str] = field(
default=snake_case__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__UpperCamelCase: Optional[str] = field(
default=snake_case__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class lowerCamelCase_ :
'''simple docstring'''
__UpperCamelCase: Optional[str] = field(
default=snake_case__ , metadata={"help": "The input training data file (a text file)."} )
__UpperCamelCase: Optional[str] = field(
default=snake_case__ , metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
} , )
__UpperCamelCase: Optional[str] = field(
default=snake_case__ , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
__UpperCamelCase: Optional[str] = field(
default=snake_case__ , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , )
__UpperCamelCase: Optional[str] = field(
default=snake_case__ , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , )
__UpperCamelCase: bool = field(
default=snake_case__ , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , )
__UpperCamelCase: bool = field(
default=snake_case__ , metadata={"help": "Train with masked-language modeling loss instead of language modeling."} )
__UpperCamelCase: bool = field(default=snake_case__ , metadata={"help": "Whether ot not to use whole word mask."} )
__UpperCamelCase: float = field(
default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
__UpperCamelCase: float = field(
default=1 / 6 , metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
} , )
__UpperCamelCase: int = field(
default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} )
__UpperCamelCase: int = field(
default=-1 , metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
} , )
__UpperCamelCase: bool = field(
default=snake_case__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def UpperCamelCase_ ( _UpperCAmelCase : DataTrainingArguments , _UpperCAmelCase : PreTrainedTokenizer , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[str] = None , ) -> str:
"""simple docstring"""
def _dataset(_UpperCAmelCase : str , _UpperCAmelCase : Dict=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("You need to set world whole masking and mlm to True for Chinese Whole Word Mask" )
return LineByLineWithRefDataset(
tokenizer=__lowercase , file_path=__lowercase , block_size=args.block_size , ref_path=__lowercase , )
return LineByLineTextDataset(tokenizer=__lowercase , file_path=__lowercase , block_size=args.block_size )
else:
return TextDataset(
tokenizer=__lowercase , file_path=__lowercase , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=__lowercase , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(__lowercase ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def UpperCamelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file "
"or remove the --do_eval argument." )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , __lowercase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
_UpperCAmelCase : int = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_UpperCAmelCase : Any = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
_UpperCAmelCase : int = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.tokenizer_name:
_UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_UpperCAmelCase : str = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"
" script, save it,and load it from here, using --tokenizer_name" )
if model_args.model_name_or_path:
_UpperCAmelCase : List[Any] = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , )
else:
logger.info("Training new model from scratch" )
_UpperCAmelCase : Any = AutoModelWithLMHead.from_config(__lowercase )
model.resize_token_embeddings(len(__lowercase ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"
"--mlm flag (masked language modeling)." )
if data_args.block_size <= 0:
_UpperCAmelCase : int = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
_UpperCAmelCase : List[Any] = min(data_args.block_size , tokenizer.max_len )
# Get datasets
_UpperCAmelCase : Any = (
get_dataset(__lowercase , tokenizer=__lowercase , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
_UpperCAmelCase : Optional[Any] = (
get_dataset(__lowercase , tokenizer=__lowercase , evaluate=__lowercase , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
_UpperCAmelCase : Dict = DataCollatorForPermutationLanguageModeling(
tokenizer=__lowercase , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
_UpperCAmelCase : Tuple = DataCollatorForWholeWordMask(
tokenizer=__lowercase , mlm_probability=data_args.mlm_probability )
else:
_UpperCAmelCase : Tuple = DataCollatorForLanguageModeling(
tokenizer=__lowercase , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_UpperCAmelCase : int = Trainer(
model=__lowercase , args=__lowercase , data_collator=__lowercase , train_dataset=__lowercase , eval_dataset=__lowercase , prediction_loss_only=__lowercase , )
# Training
if training_args.do_train:
_UpperCAmelCase : Union[str, Any] = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=__lowercase )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_UpperCAmelCase : List[str] = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_UpperCAmelCase : Dict = trainer.evaluate()
_UpperCAmelCase : Union[str, Any] = math.exp(eval_output["eval_loss"] )
_UpperCAmelCase : int = {"perplexity": perplexity}
_UpperCAmelCase : List[Any] = os.path.join(training_args.output_dir , "eval_results_lm.txt" )
if trainer.is_world_master():
with open(__lowercase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , __lowercase , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
results.update(__lowercase )
return results
def UpperCamelCase_ ( _UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 31 |
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
_lowerCamelCase : int = False
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Union[str, Any] , lowercase : Optional[int]=32 ):
'''simple docstring'''
set_seed(0 )
_snake_case = UNetaDModel(sample_size=lowercase , in_channels=3 , out_channels=3 )
_snake_case = torch.optim.SGD(model.parameters() , lr=0.0001 )
return model, optimizer
@slow
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
_snake_case = DDPMScheduler(
num_train_timesteps=1_000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=lowercase , )
_snake_case = DDIMScheduler(
num_train_timesteps=1_000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=lowercase , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
_snake_case = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(lowercase ) for _ in range(4 )]
_snake_case = [torch.randn((4, 3, 32, 32) ).to(lowercase ) for _ in range(4 )]
_snake_case = [torch.randint(0 , 1_000 , (4,) ).long().to(lowercase ) for _ in range(4 )]
# train with a DDPM scheduler
_snake_case , _snake_case = self.get_model_optimizer(resolution=32 )
model.train().to(lowercase )
for i in range(4 ):
optimizer.zero_grad()
_snake_case = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_snake_case = model(lowercase , timesteps[i] ).sample
_snake_case = torch.nn.functional.mse_loss(lowercase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
_snake_case , _snake_case = self.get_model_optimizer(resolution=32 )
model.train().to(lowercase )
for i in range(4 ):
optimizer.zero_grad()
_snake_case = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_snake_case = model(lowercase , timesteps[i] ).sample
_snake_case = torch.nn.functional.mse_loss(lowercase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-5 ) )
self.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-5 ) ) | 282 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__a = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
A : Optional[int] = ["""pixel_values"""]
def __init__( self , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = PIL.Image.BICUBIC , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 1 / 255 , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(**lowercase_ )
lowercase : List[Any] = size if size is not None else {"height": 256, "width": 256}
lowercase : Dict = get_size_dict(lowercase_ )
lowercase : Optional[Any] = crop_size if crop_size is not None else {"height": 224, "width": 224}
lowercase : List[str] = get_size_dict(lowercase_ , param_name='''crop_size''' )
lowercase : Dict = do_resize
lowercase : int = size
lowercase : str = resample
lowercase : str = do_center_crop
lowercase : Dict = crop_size
lowercase : Dict = do_rescale
lowercase : str = rescale_factor
lowercase : List[Any] = do_normalize
lowercase : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase : str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = PIL.Image.BICUBIC , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ):
lowercase : List[Any] = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
lowercase_ , size=(size['''height'''], size['''width''']) , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ):
lowercase : Tuple = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(lowercase_ , size=(size['''height'''], size['''width''']) , data_format=lowercase_ , **lowercase_ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ):
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ):
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ , ):
lowercase : Tuple = do_resize if do_resize is not None else self.do_resize
lowercase : Dict = resample if resample is not None else self.resample
lowercase : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
lowercase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : Tuple = do_normalize if do_normalize is not None else self.do_normalize
lowercase : Optional[int] = image_mean if image_mean is not None else self.image_mean
lowercase : Optional[Any] = image_std if image_std is not None else self.image_std
lowercase : Tuple = size if size is not None else self.size
lowercase : Union[str, Any] = get_size_dict(lowercase_ )
lowercase : Optional[int] = crop_size if crop_size is not None else self.crop_size
lowercase : Union[str, Any] = get_size_dict(lowercase_ , param_name='''crop_size''' )
lowercase : Union[str, Any] = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowercase : Dict = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
lowercase : Dict = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_center_crop:
lowercase : Dict = [self.center_crop(image=lowercase_ , size=lowercase_ ) for image in images]
if do_rescale:
lowercase : str = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
lowercase : List[str] = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
lowercase : Tuple = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
lowercase : List[str] = {"pixel_values": images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
| 351 |
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->list[int]:
"""simple docstring"""
lowercase : Dict = int(_UpperCamelCase )
# Initialize Result
lowercase : Union[str, Any] = []
# Traverse through all denomination
for denomination in reversed(_UpperCamelCase ):
# Find denominations
while int(_UpperCamelCase ) >= int(_UpperCamelCase ):
total_value -= int(_UpperCamelCase )
answer.append(_UpperCamelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__a = []
__a = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
__a = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(F'''Denomination {i}: ''').strip()))
__a = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
__a = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
__a = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(F'''Following is minimal change for {value}: ''')
__a = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 173 | 0 |
"""simple docstring"""
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'summarization'
_SCREAMING_SNAKE_CASE = ['loss']
_SCREAMING_SNAKE_CASE = ROUGE_KEYS
_SCREAMING_SNAKE_CASE = 'rouge2'
def __init__( self , lowercase , **lowercase ) -> str:
if hparams.sortish_sampler and hparams.gpus > 1:
lowerCAmelCase = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" )
if hparams.sortish_sampler:
raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" )
super().__init__(lowercase , num_labels=lowercase , mode=self.mode , **lowercase )
use_task_specific_params(self.model , """summarization""" )
save_git_info(self.hparams.output_dir )
lowerCAmelCase = Path(self.output_dir ) / """metrics.json"""
lowerCAmelCase = Path(self.output_dir ) / """hparams.pkl"""
pickle_save(self.hparams , self.hparams_save_path )
lowerCAmelCase = 0
lowerCAmelCase = defaultdict(lowercase )
lowerCAmelCase = self.config.model_type
lowerCAmelCase = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size
lowerCAmelCase = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
lowerCAmelCase = {
"""train""": self.hparams.n_train,
"""val""": self.hparams.n_val,
"""test""": self.hparams.n_test,
}
lowerCAmelCase = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
lowerCAmelCase = {
"""train""": self.hparams.max_target_length,
"""val""": self.hparams.val_max_target_length,
"""test""": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f'target_lens: {self.target_lens}'
assert self.target_lens["train"] <= self.target_lens["test"], f'target_lens: {self.target_lens}'
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
lowerCAmelCase = get_git_info()["""repo_sha"""]
lowerCAmelCase = hparams.num_workers
lowerCAmelCase = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowercase ):
lowerCAmelCase = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
lowerCAmelCase = self.decoder_start_token_id
lowerCAmelCase = (
SeqaSeqDataset if hasattr(self.tokenizer , """prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset
)
lowerCAmelCase = False
lowerCAmelCase = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
lowerCAmelCase = self.hparams.eval_max_gen_length
else:
lowerCAmelCase = self.model.config.max_length
lowerCAmelCase = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def _snake_case ( self , lowercase ) -> Dict[str, List[str]]:
lowerCAmelCase = {
k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items()
}
save_json(lowercase , Path(self.output_dir ) / """text_batch.json""" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / """tok_batch.json""" )
lowerCAmelCase = True
return readable_batch
def _snake_case ( self , lowercase , **lowercase ) -> Union[str, Any]:
return self.model(lowercase , **lowercase )
def _snake_case ( self , lowercase ) -> Union[str, Any]:
lowerCAmelCase = self.tokenizer.batch_decode(
lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase )
return lmap(str.strip , lowercase )
def _snake_case ( self , lowercase ) -> Tuple:
lowerCAmelCase = self.tokenizer.pad_token_id
lowerCAmelCase , lowerCAmelCase = batch["""input_ids"""], batch["""attention_mask"""]
lowerCAmelCase = batch["""labels"""]
if isinstance(self.model , lowercase ):
lowerCAmelCase = self.model._shift_right(lowercase )
else:
lowerCAmelCase = shift_tokens_right(lowercase , lowercase )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
lowerCAmelCase = decoder_input_ids
self.save_readable_batch(lowercase )
lowerCAmelCase = self(lowercase , attention_mask=lowercase , decoder_input_ids=lowercase , use_cache=lowercase )
lowerCAmelCase = outputs["""logits"""]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
lowerCAmelCase = nn.CrossEntropyLoss(ignore_index=lowercase )
assert lm_logits.shape[-1] == self.vocab_size
lowerCAmelCase = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
lowerCAmelCase = nn.functional.log_softmax(lowercase , dim=-1 )
lowerCAmelCase , lowerCAmelCase = label_smoothed_nll_loss(
lowercase , lowercase , self.hparams.label_smoothing , ignore_index=lowercase )
return (loss,)
@property
def _snake_case ( self ) -> int:
return self.tokenizer.pad_token_id
def _snake_case ( self , lowercase , lowercase ) -> Dict:
lowerCAmelCase = self._step(lowercase )
lowerCAmelCase = dict(zip(self.loss_names , lowercase ) )
# tokens per batch
lowerCAmelCase = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum()
lowerCAmelCase = batch["""input_ids"""].shape[0]
lowerCAmelCase = batch["""input_ids"""].eq(self.pad ).sum()
lowerCAmelCase = batch["""input_ids"""].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def _snake_case ( self , lowercase , lowercase ) -> Dict:
return self._generative_step(lowercase )
def _snake_case ( self , lowercase , lowercase="val" ) -> Dict:
self.step_count += 1
lowerCAmelCase = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
lowerCAmelCase = losses["""loss"""]
lowerCAmelCase = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""]
}
lowerCAmelCase = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
lowerCAmelCase = torch.tensor(lowercase ).type_as(lowercase )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(lowercase )
lowerCAmelCase = {f'{prefix}_avg_{k}': x for k, x in losses.items()}
lowerCAmelCase = self.step_count
self.metrics[prefix].append(lowercase ) # callback writes this to self.metrics_save_path
lowerCAmelCase = flatten_list([x["""preds"""] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
f'{prefix}_loss': loss,
f'{prefix}_{self.val_metric}': metric_tensor,
}
def _snake_case ( self , lowercase , lowercase ) -> Dict:
return calculate_rouge(lowercase , lowercase )
def _snake_case ( self , lowercase ) -> dict:
lowerCAmelCase = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
lowerCAmelCase = self.model.generate(
batch["""input_ids"""] , attention_mask=batch["""attention_mask"""] , use_cache=lowercase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
lowerCAmelCase = (time.time() - ta) / batch["""input_ids"""].shape[0]
lowerCAmelCase = self.ids_to_clean_text(lowercase )
lowerCAmelCase = self.ids_to_clean_text(batch["""labels"""] )
lowerCAmelCase = self._step(lowercase )
lowerCAmelCase = dict(zip(self.loss_names , lowercase ) )
lowerCAmelCase = self.calc_generative_metrics(lowercase , lowercase )
lowerCAmelCase = np.mean(lmap(lowercase , lowercase ) )
base_metrics.update(gen_time=lowercase , gen_len=lowercase , preds=lowercase , target=lowercase , **lowercase )
return base_metrics
def _snake_case ( self , lowercase , lowercase ) -> Dict:
return self._generative_step(lowercase )
def _snake_case ( self , lowercase ) -> int:
return self.validation_epoch_end(lowercase , prefix="""test""" )
def _snake_case ( self , lowercase ) -> SeqaSeqDataset:
lowerCAmelCase = self.n_obs[type_path]
lowerCAmelCase = self.target_lens[type_path]
lowerCAmelCase = self.dataset_class(
self.tokenizer , type_path=lowercase , n_obs=lowercase , max_target_length=lowercase , **self.dataset_kwargs , )
return dataset
def _snake_case ( self , lowercase , lowercase , lowercase = False ) -> DataLoader:
lowerCAmelCase = self.get_dataset(lowercase )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
lowerCAmelCase = dataset.make_sortish_sampler(lowercase , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowercase , batch_size=lowercase , collate_fn=dataset.collate_fn , shuffle=lowercase , num_workers=self.num_workers , sampler=lowercase , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
lowerCAmelCase = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowercase , batch_sampler=lowercase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
lowercase , batch_size=lowercase , collate_fn=dataset.collate_fn , shuffle=lowercase , num_workers=self.num_workers , sampler=lowercase , )
def _snake_case ( self ) -> DataLoader:
lowerCAmelCase = self.get_dataloader("""train""" , batch_size=self.hparams.train_batch_size , shuffle=lowercase )
return dataloader
def _snake_case ( self ) -> DataLoader:
return self.get_dataloader("""val""" , batch_size=self.hparams.eval_batch_size )
def _snake_case ( self ) -> DataLoader:
return self.get_dataloader("""test""" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def _snake_case ( lowercase , lowercase ) -> Optional[int]:
BaseTransformer.add_model_specific_args(lowercase , lowercase )
add_generic_args(lowercase , lowercase )
parser.add_argument(
"""--max_source_length""" , default=1_024 , type=lowercase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--max_target_length""" , default=56 , type=lowercase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--val_max_target_length""" , default=142 , type=lowercase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--test_max_target_length""" , default=142 , type=lowercase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument("""--freeze_encoder""" , action="""store_true""" )
parser.add_argument("""--freeze_embeds""" , action="""store_true""" )
parser.add_argument("""--sortish_sampler""" , action="""store_true""" , default=lowercase )
parser.add_argument("""--overwrite_output_dir""" , action="""store_true""" , default=lowercase )
parser.add_argument("""--max_tokens_per_batch""" , type=lowercase , default=lowercase )
parser.add_argument("""--logger_name""" , type=lowercase , choices=["""default""", """wandb""", """wandb_shared"""] , default="""default""" )
parser.add_argument("""--n_train""" , type=lowercase , default=-1 , required=lowercase , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_val""" , type=lowercase , default=500 , required=lowercase , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_test""" , type=lowercase , default=-1 , required=lowercase , help="""# examples. -1 means use all.""" )
parser.add_argument(
"""--task""" , type=lowercase , default="""summarization""" , required=lowercase , help="""# examples. -1 means use all.""" )
parser.add_argument("""--label_smoothing""" , type=lowercase , default=0.0 , required=lowercase )
parser.add_argument("""--src_lang""" , type=lowercase , default="""""" , required=lowercase )
parser.add_argument("""--tgt_lang""" , type=lowercase , default="""""" , required=lowercase )
parser.add_argument("""--eval_beams""" , type=lowercase , default=lowercase , required=lowercase )
parser.add_argument(
"""--val_metric""" , type=lowercase , default=lowercase , required=lowercase , choices=["""bleu""", """rouge2""", """loss""", None] )
parser.add_argument("""--eval_max_gen_length""" , type=lowercase , default=lowercase , help="""never generate more than n tokens""" )
parser.add_argument("""--save_top_k""" , type=lowercase , default=1 , required=lowercase , help="""How many checkpoints to save""" )
parser.add_argument(
"""--early_stopping_patience""" , type=lowercase , default=-1 , required=lowercase , help=(
"""-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"""
""" val_check_interval will effect it."""
) , )
return parser
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'translation'
_SCREAMING_SNAKE_CASE = ['loss']
_SCREAMING_SNAKE_CASE = ['bleu']
_SCREAMING_SNAKE_CASE = 'bleu'
def __init__( self , lowercase , **lowercase ) -> Union[str, Any]:
super().__init__(lowercase , **lowercase )
lowerCAmelCase = hparams.src_lang
lowerCAmelCase = hparams.tgt_lang
def _snake_case ( self , lowercase , lowercase ) -> dict:
return calculate_bleu(lowercase , lowercase )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int=None ):
'''simple docstring'''
Path(args.output_dir ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
check_output_dir(SCREAMING_SNAKE_CASE , expected_items=3 )
if model is None:
if "summarization" in args.task:
lowerCAmelCase = SummarizationModule(SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase = TranslationModule(SCREAMING_SNAKE_CASE )
lowerCAmelCase = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("""/tmp""" )
or str(args.output_dir ).startswith("""/var""" )
):
lowerCAmelCase = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
lowerCAmelCase = os.environ.get("""WANDB_PROJECT""" , SCREAMING_SNAKE_CASE )
lowerCAmelCase = WandbLogger(name=model.output_dir.name , project=SCREAMING_SNAKE_CASE )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
lowerCAmelCase = WandbLogger(name=model.output_dir.name , project=F'hf_{dataset}' )
if args.early_stopping_patience >= 0:
lowerCAmelCase = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
lowerCAmelCase = False
lowerCAmelCase = args.val_metric == """loss"""
lowerCAmelCase = generic_train(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , SCREAMING_SNAKE_CASE ) , early_stopping_callback=SCREAMING_SNAKE_CASE , logger=SCREAMING_SNAKE_CASE , )
pickle_save(model.hparams , model.output_dir / """hparams.pkl""" )
if not args.do_predict:
return model
lowerCAmelCase = """"""
lowerCAmelCase = sorted(glob.glob(os.path.join(args.output_dir , """*.ckpt""" ) , recursive=SCREAMING_SNAKE_CASE ) )
if checkpoints:
lowerCAmelCase = checkpoints[-1]
lowerCAmelCase = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
SCREAMING_SNAKE_CASE__ = pl.Trainer.add_argparse_args(parser)
SCREAMING_SNAKE_CASE__ = SummarizationModule.add_model_specific_args(parser, os.getcwd())
SCREAMING_SNAKE_CASE__ = parser.parse_args()
main(args)
| 46 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
SCREAMING_SNAKE_CASE__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 46 | 1 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_A = '''platform'''
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def __UpperCamelCase ( _A , _A , _A=None , _A=None , _A=None , _A=None , _A=None , _A=None , ):
if attention_mask is None:
lowerCAmelCase_ = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowerCAmelCase_ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowerCAmelCase_ = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCAmelCase_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCAmelCase_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class A :
def __init__( self, UpperCamelCase__, UpperCamelCase__=13, UpperCamelCase__=7, UpperCamelCase__=True, UpperCamelCase__=False, UpperCamelCase__=99, UpperCamelCase__=16, UpperCamelCase__=2, UpperCamelCase__=4, UpperCamelCase__=4, UpperCamelCase__="gelu", UpperCamelCase__=0.1, UpperCamelCase__=0.1, UpperCamelCase__=32, UpperCamelCase__=2, UpperCamelCase__=1, UpperCamelCase__=0, UpperCamelCase__=0.02, ):
"""simple docstring"""
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = seq_length
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = eos_token_id
lowerCAmelCase_ = pad_token_id
lowerCAmelCase_ = bos_token_id
lowerCAmelCase_ = initializer_range
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size ), 3, self.vocab_size )
lowerCAmelCase_ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1), dtype=np.intaa )), -1 )
lowerCAmelCase_ = shift_tokens_right(UpperCamelCase__, 1, 2 )
lowerCAmelCase_ = BlenderbotSmallConfig(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, initializer_range=self.initializer_range, use_cache=UpperCamelCase__, )
lowerCAmelCase_ = prepare_blenderbot_inputs_dict(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ = self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = 20
lowerCAmelCase_ = model_class_name(UpperCamelCase__ )
lowerCAmelCase_ = model.encode(inputs_dict['''input_ids'''] )
lowerCAmelCase_ , lowerCAmelCase_ = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCAmelCase_ = model.init_cache(decoder_input_ids.shape[0], UpperCamelCase__, UpperCamelCase__ )
lowerCAmelCase_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length), dtype='''i4''' )
lowerCAmelCase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), )
lowerCAmelCase_ = model.decode(
decoder_input_ids[:, :-1], UpperCamelCase__, decoder_attention_mask=UpperCamelCase__, past_key_values=UpperCamelCase__, decoder_position_ids=UpperCamelCase__, )
lowerCAmelCase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype='''i4''' )
lowerCAmelCase_ = model.decode(
decoder_input_ids[:, -1:], UpperCamelCase__, decoder_attention_mask=UpperCamelCase__, past_key_values=outputs_cache.past_key_values, decoder_position_ids=UpperCamelCase__, )
lowerCAmelCase_ = model.decode(UpperCamelCase__, UpperCamelCase__ )
lowerCAmelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3, msg=f"Max diff is {diff}" )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = 20
lowerCAmelCase_ = model_class_name(UpperCamelCase__ )
lowerCAmelCase_ = model.encode(inputs_dict['''input_ids'''] )
lowerCAmelCase_ , lowerCAmelCase_ = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCAmelCase_ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
], axis=-1, )
lowerCAmelCase_ = model.init_cache(decoder_input_ids.shape[0], UpperCamelCase__, UpperCamelCase__ )
lowerCAmelCase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), )
lowerCAmelCase_ = model.decode(
decoder_input_ids[:, :-1], UpperCamelCase__, decoder_attention_mask=UpperCamelCase__, past_key_values=UpperCamelCase__, decoder_position_ids=UpperCamelCase__, )
lowerCAmelCase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype='''i4''' )
lowerCAmelCase_ = model.decode(
decoder_input_ids[:, -1:], UpperCamelCase__, past_key_values=outputs_cache.past_key_values, decoder_attention_mask=UpperCamelCase__, decoder_position_ids=UpperCamelCase__, )
lowerCAmelCase_ = model.decode(UpperCamelCase__, UpperCamelCase__, decoder_attention_mask=UpperCamelCase__ )
lowerCAmelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3, msg=f"Max diff is {diff}" )
@require_flax
class A ( unittest.TestCase ):
__snake_case = 99
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
], dtype=np.intaa, )
lowerCAmelCase_ = input_ids.shape[0]
lowerCAmelCase_ = BlenderbotSmallConfig(
vocab_size=self.vocab_size, d_model=24, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=32, decoder_ffn_dim=32, max_position_embeddings=48, eos_token_id=2, pad_token_id=1, bos_token_id=0, )
return config, input_ids, batch_size
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_config_and_data()
lowerCAmelCase_ = FlaxBlenderbotSmallForConditionalGeneration(UpperCamelCase__ )
lowerCAmelCase_ = lm_model(input_ids=UpperCamelCase__ )
lowerCAmelCase_ = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape, UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = BlenderbotSmallConfig(
vocab_size=self.vocab_size, d_model=14, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=8, decoder_ffn_dim=8, max_position_embeddings=48, )
lowerCAmelCase_ = FlaxBlenderbotSmallForConditionalGeneration(UpperCamelCase__ )
lowerCAmelCase_ = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]], dtype=np.intaa )
lowerCAmelCase_ = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]], dtype=np.intaa )
lowerCAmelCase_ = lm_model(input_ids=UpperCamelCase__, decoder_input_ids=UpperCamelCase__ )
lowerCAmelCase_ = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape, UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]], dtype=np.intaa )
lowerCAmelCase_ = shift_tokens_right(UpperCamelCase__, 1, 2 )
lowerCAmelCase_ = np.equal(UpperCamelCase__, 1 ).astype(np.floataa ).sum()
lowerCAmelCase_ = np.equal(UpperCamelCase__, 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape, input_ids.shape )
self.assertEqual(UpperCamelCase__, n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0], 2 ).all() )
@require_flax
class A ( __UpperCAmelCase , unittest.TestCase , __UpperCAmelCase ):
__snake_case = True
__snake_case = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
__snake_case = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = FlaxBlenderbotSmallModelTester(self )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase_ = self._prepare_for_class(UpperCamelCase__, UpperCamelCase__ )
lowerCAmelCase_ = model_class(UpperCamelCase__ )
@jax.jit
def encode_jitted(UpperCamelCase__, UpperCamelCase__=None, **UpperCamelCase__ ):
return model.encode(input_ids=UpperCamelCase__, attention_mask=UpperCamelCase__ )
with self.subTest('''JIT Enabled''' ):
lowerCAmelCase_ = encode_jitted(**UpperCamelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCAmelCase_ = encode_jitted(**UpperCamelCase__ ).to_tuple()
self.assertEqual(len(UpperCamelCase__ ), len(UpperCamelCase__ ) )
for jitted_output, output in zip(UpperCamelCase__, UpperCamelCase__ ):
self.assertEqual(jitted_output.shape, output.shape )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase_ = model_class(UpperCamelCase__ )
lowerCAmelCase_ = model.encode(inputs_dict['''input_ids'''], inputs_dict['''attention_mask'''] )
lowerCAmelCase_ = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
return model.decode(
decoder_input_ids=UpperCamelCase__, decoder_attention_mask=UpperCamelCase__, encoder_outputs=UpperCamelCase__, )
with self.subTest('''JIT Enabled''' ):
lowerCAmelCase_ = decode_jitted(**UpperCamelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCAmelCase_ = decode_jitted(**UpperCamelCase__ ).to_tuple()
self.assertEqual(len(UpperCamelCase__ ), len(UpperCamelCase__ ) )
for jitted_output, output in zip(UpperCamelCase__, UpperCamelCase__ ):
self.assertEqual(jitted_output.shape, output.shape )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowerCAmelCase_ = model_class_name.from_pretrained('''facebook/blenderbot_small-90M''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowerCAmelCase_ = np.ones((1, 1) ) * model.config.eos_token_id
lowerCAmelCase_ = model(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
| 167 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
_A = False
class A ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class A ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCAmelCase_ = '''A painting of a squirrel eating a burger '''
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(
prompt=UpperCamelCase__, generator=UpperCamelCase__, guidance_scale=7.5, num_inference_steps=2, output_type='''numpy''' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCamelCase__ )
lowerCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained(UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCAmelCase_ = generator.manual_seed(0 )
lowerCAmelCase_ = pipe(
prompt=UpperCamelCase__, generator=UpperCamelCase__, guidance_scale=7.5, num_inference_steps=2, output_type='''numpy''' ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained(
'''shi-labs/versatile-diffusion''', torch_dtype=torch.floataa )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCAmelCase_ = '''A painting of a squirrel eating a burger '''
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(
prompt=UpperCamelCase__, generator=UpperCamelCase__, guidance_scale=7.5, num_inference_steps=50, output_type='''numpy''' ).images
lowerCAmelCase_ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase_ = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 167 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class _UpperCAmelCase ( lowercase_ , unittest.TestCase ):
UpperCamelCase = BlenderbotSmallTokenizer
UpperCamelCase = False
def lowerCamelCase ( self :int ):
super().setUp()
A = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"]
A = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
A = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""]
A = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"}
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__UpperCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__UpperCamelCase ) )
def lowerCamelCase ( self :Union[str, Any] , **__UpperCamelCase :List[str] ):
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def lowerCamelCase ( self :Tuple , __UpperCamelCase :Any ):
A = "adapt act apte"
A = "adapt act apte"
return input_text, output_text
def lowerCamelCase ( self :str ):
A = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
A = "adapt act apte"
A = ["adapt", "act", "ap@@", "te"]
A = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
A = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
A = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
def lowerCamelCase ( self :Dict ):
A = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
assert tok("sam" ).input_ids == [13_84]
A = "I am a small frog."
A = tok([src_text] , padding=__UpperCamelCase , truncation=__UpperCamelCase )["input_ids"]
A = tok.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def lowerCamelCase ( self :List[Any] ):
A = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
A = "I am a small frog ."
A = "."
A = tok(__UpperCamelCase )["input_ids"]
A = tok(__UpperCamelCase )["input_ids"]
assert encoded[-1] == encoded_dot[0]
| 292 |
"""simple docstring"""
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class _UpperCAmelCase :
UpperCamelCase = None
def lowerCamelCase ( self :List[Any] ):
A = self.feature_extraction_class(**self.feat_extract_dict )
A = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , __UpperCamelCase )
def lowerCamelCase ( self :Dict ):
A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A = os.path.join(__UpperCamelCase , "feat_extract.json" )
feat_extract_first.to_json_file(__UpperCamelCase )
A = self.feature_extraction_class.from_json_file(__UpperCamelCase )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def lowerCamelCase ( self :Dict ):
A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A = feat_extract_first.save_pretrained(__UpperCamelCase )[0]
check_json_file_has_correct_format(__UpperCamelCase )
A = self.feature_extraction_class.from_pretrained(__UpperCamelCase )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def lowerCamelCase ( self :Tuple ):
A = self.feature_extraction_class()
self.assertIsNotNone(__UpperCamelCase )
| 292 | 1 |
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
UpperCamelCase__ = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
UpperCamelCase__ = json.load(f)
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : int , _A : Dict ):
'''simple docstring'''
return FSMTTokenizer.from_pretrained(_A )
def lowercase_ ( self : Tuple , _A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : int = FSMTForConditionalGeneration.from_pretrained(_A ).to(_A )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['''en-ru''', 2_6.0],
['''ru-en''', 2_2.0],
['''en-de''', 2_2.0],
['''de-en''', 2_9.0],
] )
@slow
def lowercase_ ( self : Dict , _A : Optional[int] , _A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = f"""facebook/wmt19-{pair}"""
UpperCAmelCase__ : List[Any] = self.get_tokenizer(_A )
UpperCAmelCase__ : List[str] = self.get_model(_A )
UpperCAmelCase__ : List[Any] = bleu_data[pair]['''src''']
UpperCAmelCase__ : Optional[int] = bleu_data[pair]['''tgt''']
UpperCAmelCase__ : Union[str, Any] = tokenizer(_A , return_tensors='''pt''' , truncation=_A , padding='''longest''' ).to(_A )
UpperCAmelCase__ : Optional[Any] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
UpperCAmelCase__ : Any = tokenizer.batch_decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A )
UpperCAmelCase__ : List[str] = calculate_bleu(_A , _A )
print(_A )
self.assertGreaterEqual(scores['''bleu'''] , _A )
| 299 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
UpperCamelCase__ = random.Random()
if is_torch_available():
import torch
def a__ ( lowerCAmelCase__ , lowerCAmelCase__=1.0 , lowerCAmelCase__=None , lowerCAmelCase__=None ) -> Optional[Any]:
if rng is None:
UpperCAmelCase__ : List[str] = global_rng
UpperCAmelCase__ : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self : Any , _A : List[str] , _A : int=7 , _A : Dict=400 , _A : Tuple=2_000 , _A : Optional[int]=1 , _A : List[Any]=0.0 , _A : Any=16_000 , _A : int=True , _A : str=True , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = parent
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : Dict = min_seq_length
UpperCAmelCase__ : str = max_seq_length
UpperCAmelCase__ : List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCAmelCase__ : Optional[Any] = feature_size
UpperCAmelCase__ : int = padding_value
UpperCAmelCase__ : int = sampling_rate
UpperCAmelCase__ : Tuple = return_attention_mask
UpperCAmelCase__ : str = do_normalize
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowercase_ ( self : int , _A : Optional[Any]=False , _A : Any=False ):
'''simple docstring'''
def _flatten(_A : Union[str, Any] ):
return list(itertools.chain(*_A ) )
if equal_length:
UpperCAmelCase__ : Tuple = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCAmelCase__ : Optional[int] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase__ : Dict = [np.asarray(_A ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCamelCase_ ( __a , unittest.TestCase ):
lowerCAmelCase__ = ASTFeatureExtractor
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : int = ASTFeatureExtractionTester(self )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase__ : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
UpperCAmelCase__ : List[Any] = [np.asarray(_A ) for speech_input in speech_inputs]
# Test not batched input
UpperCAmelCase__ : str = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
UpperCAmelCase__ : List[Any] = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) )
# Test batched
UpperCAmelCase__ : Optional[Any] = feat_extract(_A , padding=_A , return_tensors='''np''' ).input_values
UpperCAmelCase__ : Optional[int] = feat_extract(_A , padding=_A , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCAmelCase__ : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCAmelCase__ : Any = np.asarray(_A )
UpperCAmelCase__ : int = feat_extract(_A , return_tensors='''np''' ).input_values
UpperCAmelCase__ : List[str] = feat_extract(_A , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) )
@require_torch
def lowercase_ ( self : List[str] ):
'''simple docstring'''
import torch
UpperCAmelCase__ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase__ : Any = np.random.rand(100 ).astype(np.floataa )
UpperCAmelCase__ : int = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCAmelCase__ : str = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCAmelCase__ : Any = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def lowercase_ ( self : int , _A : List[Any] ):
'''simple docstring'''
from datasets import load_dataset
UpperCAmelCase__ : Tuple = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
UpperCAmelCase__ : List[Any] = ds.sort('''id''' ).select(range(_A ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
@require_torch
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = torch.tensor(
[-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6,
-1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3,
-1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6,
-0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] )
# fmt: on
UpperCAmelCase__ : Optional[Any] = self._load_datasamples(1 )
UpperCAmelCase__ : Optional[int] = ASTFeatureExtractor()
UpperCAmelCase__ : Dict = feature_extractor(_A , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 1_024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , _A , atol=1e-4 ) )
| 299 | 1 |
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def a_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : Tuple = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
lowercase__ : List[str] = DatasetInfosDict.from_directory(_lowerCAmelCase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def a_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : DatasetInfo ):
'''simple docstring'''
lowercase__ : Optional[Any] = str(_lowerCAmelCase )
dataset_info.write_to_directory(_lowerCAmelCase )
lowercase__ : Dict = DatasetInfo.from_directory(_lowerCAmelCase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(_lowerCAmelCase , 'dataset_info.json' ) )
def a_ ( ):
'''simple docstring'''
lowercase__ : Tuple = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
lowercase__ : List[str] = dataset_info._to_yaml_dict()
assert sorted(_lowerCAmelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
lowercase__ : List[Any] = yaml.safe_dump(_lowerCAmelCase )
lowercase__ : List[Any] = yaml.safe_load(_lowerCAmelCase )
assert dataset_info_yaml_dict == reloaded
def a_ ( ):
'''simple docstring'''
lowercase__ : List[Any] = DatasetInfo()
lowercase__ : Union[str, Any] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=1337 ),
} ),
] , )
def a_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : DatasetInfosDict ):
'''simple docstring'''
lowercase__ : int = str(_lowerCAmelCase )
dataset_infos_dict.write_to_directory(_lowerCAmelCase )
lowercase__ : List[Any] = DatasetInfosDict.from_directory(_lowerCAmelCase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
lowercase__ : Dict = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
lowercase__ : Any = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(_lowerCAmelCase , 'README.md' ) )
| 77 | """simple docstring"""
import pickle
import numpy as np
from matplotlib import pyplot as plt
class UpperCAmelCase_ :
def __init__( self , a , a , a , a , a , a=0.2 , a=0.2 ) -> Dict:
lowercase__ : Any = bp_numa
lowercase__ : Optional[int] = bp_numa
lowercase__ : Tuple = bp_numa
lowercase__ : Optional[Any] = conva_get[:2]
lowercase__ : Optional[int] = conva_get[2]
lowercase__ : Optional[Any] = size_pa
lowercase__ : Union[str, Any] = rate_w
lowercase__ : Union[str, Any] = rate_t
lowercase__ : List[Any] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
lowercase__ : Optional[Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowercase__ : int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowercase__ : Any = -2 * np.random.rand(self.conva[1] ) + 1
lowercase__ : int = -2 * np.random.rand(self.num_bpa ) + 1
lowercase__ : int = -2 * np.random.rand(self.num_bpa ) + 1
def _UpperCAmelCase ( self , a ) -> Union[str, Any]:
# save model dict with pickle
lowercase__ : Optional[Any] = {
'num_bp1': self.num_bpa,
'num_bp2': self.num_bpa,
'num_bp3': self.num_bpa,
'conv1': self.conva,
'step_conv1': self.step_conva,
'size_pooling1': self.size_poolinga,
'rate_weight': self.rate_weight,
'rate_thre': self.rate_thre,
'w_conv1': self.w_conva,
'wkj': self.wkj,
'vji': self.vji,
'thre_conv1': self.thre_conva,
'thre_bp2': self.thre_bpa,
'thre_bp3': self.thre_bpa,
}
with open(a , 'wb' ) as f:
pickle.dump(a , a )
print(f"""Model saved: {save_path}""" )
@classmethod
def _UpperCAmelCase ( cls , a ) -> Any:
# read saved model
with open(a , 'rb' ) as f:
lowercase__ : Optional[int] = pickle.load(a ) # noqa: S301
lowercase__ : Optional[int] = model_dic.get('conv1' )
conv_get.append(model_dic.get('step_conv1' ) )
lowercase__ : List[Any] = model_dic.get('size_pooling1' )
lowercase__ : Tuple = model_dic.get('num_bp1' )
lowercase__ : int = model_dic.get('num_bp2' )
lowercase__ : int = model_dic.get('num_bp3' )
lowercase__ : Union[str, Any] = model_dic.get('rate_weight' )
lowercase__ : Tuple = model_dic.get('rate_thre' )
# create model instance
lowercase__ : Tuple = CNN(a , a , a , a , a , a , a )
# modify model parameter
lowercase__ : str = model_dic.get('w_conv1' )
lowercase__ : Optional[int] = model_dic.get('wkj' )
lowercase__ : Tuple = model_dic.get('vji' )
lowercase__ : str = model_dic.get('thre_conv1' )
lowercase__ : Union[str, Any] = model_dic.get('thre_bp2' )
lowercase__ : List[str] = model_dic.get('thre_bp3' )
return conv_ins
def _UpperCAmelCase ( self , a ) -> str:
return 1 / (1 + np.exp(-1 * x ))
def _UpperCAmelCase ( self , a ) -> Any:
return round(a , 3 )
def _UpperCAmelCase ( self , a , a , a , a , a ) -> List[str]:
# convolution process
lowercase__ : int = convs[0]
lowercase__ : Optional[Any] = convs[1]
lowercase__ : int = np.shape(a )[0]
# get the data slice of original image data, data_focus
lowercase__ : Optional[Any] = []
for i_focus in range(0 , size_data - size_conv + 1 , a ):
for j_focus in range(0 , size_data - size_conv + 1 , a ):
lowercase__ : Optional[int] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(a )
# calculate the feature map of every single kernel, and saved as list of matrix
lowercase__ : Union[str, Any] = []
lowercase__ : Dict = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(a ):
lowercase__ : Any = []
for i_focus in range(len(a ) ):
lowercase__ : Tuple = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(a ) )
lowercase__ : Optional[Any] = np.asmatrix(a ).reshape(
a , a )
data_featuremap.append(a )
# expanding the data slice to One dimenssion
lowercase__ : str = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(a ) )
lowercase__ : int = np.asarray(a )
return focus_list, data_featuremap
def _UpperCAmelCase ( self , a , a , a="average_pool" ) -> str:
# pooling process
lowercase__ : List[str] = len(featuremaps[0] )
lowercase__ : List[str] = int(size_map / size_pooling )
lowercase__ : str = []
for i_map in range(len(a ) ):
lowercase__ : List[str] = featuremaps[i_map]
lowercase__ : Optional[int] = []
for i_focus in range(0 , a , a ):
for j_focus in range(0 , a , a ):
lowercase__ : List[Any] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(a ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(a ) )
lowercase__ : List[Any] = np.asmatrix(a ).reshape(a , a )
featuremap_pooled.append(a )
return featuremap_pooled
def _UpperCAmelCase ( self , a ) -> List[str]:
# expanding three dimension data to one dimension list
lowercase__ : Any = []
for i in range(len(a ) ):
lowercase__ : Optional[int] = np.shape(data[i] )
lowercase__ : int = data[i].reshape(1 , shapes[0] * shapes[1] )
lowercase__ : str = data_listed.getA().tolist()[0]
data_expanded.extend(a )
lowercase__ : int = np.asarray(a )
return data_expanded
def _UpperCAmelCase ( self , a ) -> Dict:
# expanding matrix to one dimension list
lowercase__ : Dict = np.asarray(a )
lowercase__ : Union[str, Any] = np.shape(a )
lowercase__ : Optional[Any] = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def _UpperCAmelCase ( self , a , a , a , a , a ) -> List[Any]:
lowercase__ : Dict = []
lowercase__ : int = 0
for i_map in range(a ):
lowercase__ : str = np.ones((size_map, size_map) )
for i in range(0 , a , a ):
for j in range(0 , a , a ):
lowercase__ : Optional[Any] = pd_pool[
i_pool
]
lowercase__ : Union[str, Any] = i_pool + 1
lowercase__ : List[Any] = np.multiply(
a , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(a )
return pd_all
def _UpperCAmelCase ( self , a , a , a , a , a , a=bool ) -> str:
# model traning
print('----------------------Start Training-------------------------' )
print((' - - Shape: Train_Data ', np.shape(a )) )
print((' - - Shape: Teach_Data ', np.shape(a )) )
lowercase__ : int = 0
lowercase__ : List[Any] = []
lowercase__ : Union[str, Any] = 1_0_0_0_0
while rp < n_repeat and mse >= error_accuracy:
lowercase__ : List[Any] = 0
print(f"""-------------Learning Time {rp}--------------""" )
for p in range(len(a ) ):
# print('------------Learning Image: %d--------------'%p)
lowercase__ : Optional[int] = np.asmatrix(datas_train[p] )
lowercase__ : int = np.asarray(datas_teach[p] )
lowercase__ , lowercase__ : Union[str, Any] = self.convolute(
a , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowercase__ : Optional[Any] = self.pooling(a , self.size_poolinga )
lowercase__ : Tuple = np.shape(a )
lowercase__ : List[str] = self._expand(a )
lowercase__ : Optional[int] = data_bp_input
lowercase__ : Optional[Any] = np.dot(a , self.vji.T ) - self.thre_bpa
lowercase__ : str = self.sig(a )
lowercase__ : Tuple = np.dot(a , self.wkj.T ) - self.thre_bpa
lowercase__ : Any = self.sig(a )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
lowercase__ : int = np.multiply(
(data_teach - bp_outa) , np.multiply(a , (1 - bp_outa) ) )
lowercase__ : Any = np.multiply(
np.dot(a , self.wkj ) , np.multiply(a , (1 - bp_outa) ) )
lowercase__ : Optional[int] = np.dot(a , self.vji )
lowercase__ : Union[str, Any] = pd_i_all / (self.size_poolinga * self.size_poolinga)
lowercase__ : Any = pd_conva_pooled.T.getA().tolist()
lowercase__ : List[str] = self._calculate_gradient_from_pool(
a , a , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
lowercase__ : Optional[int] = self._expand_mat(pd_conva_all[k_conv] )
lowercase__ : Tuple = self.rate_weight * np.dot(a , a )
lowercase__ : Union[str, Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
lowercase__ : Any = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
lowercase__ : Tuple = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
lowercase__ : Tuple = self.vji + pd_j_all.T * bp_outa * self.rate_weight
lowercase__ : Tuple = self.thre_bpa - pd_k_all * self.rate_thre
lowercase__ : Optional[Any] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
lowercase__ : Dict = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
lowercase__ : str = rp + 1
lowercase__ : List[str] = error_count / patterns
all_mse.append(a )
def draw_error():
lowercase__ : Any = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(a , '+-' )
plt.plot(a , 'r--' )
plt.xlabel('Learning Times' )
plt.ylabel('All_mse' )
plt.grid(a , alpha=0.5 )
plt.show()
print('------------------Training Complished---------------------' )
print((' - - Training epoch: ', rp, f""" - - Mse: {mse:.6f}""") )
if draw_e:
draw_error()
return mse
def _UpperCAmelCase ( self , a ) -> List[Any]:
# model predict
lowercase__ : Optional[int] = []
print('-------------------Start Testing-------------------------' )
print((' - - Shape: Test_Data ', np.shape(a )) )
for p in range(len(a ) ):
lowercase__ : List[str] = np.asmatrix(datas_test[p] )
lowercase__ , lowercase__ : Tuple = self.convolute(
a , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowercase__ : Any = self.pooling(a , self.size_poolinga )
lowercase__ : Union[str, Any] = self._expand(a )
lowercase__ : Optional[Any] = data_bp_input
lowercase__ : str = bp_outa * self.vji.T - self.thre_bpa
lowercase__ : Optional[Any] = self.sig(a )
lowercase__ : Dict = bp_outa * self.wkj.T - self.thre_bpa
lowercase__ : List[str] = self.sig(a )
produce_out.extend(bp_outa.getA().tolist() )
lowercase__ : Optional[int] = [list(map(self.do_round , a ) ) for each in produce_out]
return np.asarray(a )
def _UpperCAmelCase ( self , a ) -> List[str]:
# return the data of image after convoluting process so we can check it out
lowercase__ : Any = np.asmatrix(a )
lowercase__ , lowercase__ : str = self.convolute(
a , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowercase__ : Tuple = self.pooling(a , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 77 | 1 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __UpperCamelCase ( a__ ):
@staticmethod
@abstractmethod
def __a ( lowerCAmelCase__ ) -> int:
raise NotImplementedError()
@abstractmethod
def __a ( self ) -> str:
raise NotImplementedError()
| 364 |
"""simple docstring"""
from __future__ import annotations
from math import pow, sqrt
def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float , _lowercase : float ) ->dict[str, float]:
'''simple docstring'''
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance == 0:
return {"resistance": sqrt(pow(_lowercase , 2 ) - pow(_lowercase , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(_lowercase , 2 ) - pow(_lowercase , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(_lowercase , 2 ) + pow(_lowercase , 2 ) )}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 79 | 0 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class __a :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=99 , lowerCAmelCase__=32 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=512 , lowerCAmelCase__=16 , lowerCAmelCase__=2 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , ) -> Tuple:
'''simple docstring'''
lowercase__: Optional[int] = parent
lowercase__: Any = 13
lowercase__: Tuple = 7
lowercase__: Optional[int] = True
lowercase__: Optional[int] = True
lowercase__: List[Any] = True
lowercase__: Union[str, Any] = True
lowercase__: List[Any] = 99
lowercase__: Tuple = 32
lowercase__: Tuple = 2
lowercase__: str = 4
lowercase__: List[str] = 37
lowercase__: List[Any] = """gelu"""
lowercase__: int = 0.1
lowercase__: Dict = 0.1
lowercase__: str = 512
lowercase__: List[str] = 16
lowercase__: Optional[int] = 2
lowercase__: str = 0.0_2
lowercase__: int = 3
lowercase__: Optional[Any] = 4
lowercase__: Dict = None
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__: List[str] = None
if self.use_input_mask:
lowercase__: List[str] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__: Optional[Any] = None
if self.use_token_type_ids:
lowercase__: Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__: Tuple = None
lowercase__: Tuple = None
lowercase__: Dict = None
if self.use_labels:
lowercase__: str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__: Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__: Tuple = ids_tensor([self.batch_size] , self.num_choices )
lowercase__: Tuple = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=lowerCamelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
lowercase__: str = TFRoFormerModel(config=lowerCamelCase_ )
lowercase__: Any = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowercase__: Optional[Any] = [input_ids, input_mask]
lowercase__: Optional[int] = model(lowerCamelCase_ )
lowercase__: Union[str, Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
'''simple docstring'''
lowercase__: Optional[Any] = True
lowercase__: str = TFRoFormerForCausalLM(config=lowerCamelCase_ )
lowercase__: List[str] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowercase__: int = model(lowerCamelCase_ )["""logits"""]
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Any = TFRoFormerForMaskedLM(config=lowerCamelCase_ )
lowercase__: Any = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowercase__: Dict = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
lowercase__: int = self.num_labels
lowercase__: Union[str, Any] = TFRoFormerForSequenceClassification(config=lowerCamelCase_ )
lowercase__: Union[str, Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowercase__: Any = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
'''simple docstring'''
lowercase__: Optional[Any] = self.num_choices
lowercase__: List[Any] = TFRoFormerForMultipleChoice(config=lowerCamelCase_ )
lowercase__: Union[str, Any] = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
lowercase__: Union[str, Any] = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
lowercase__: List[str] = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
lowercase__: List[str] = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
lowercase__: Optional[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
lowercase__: Any = self.num_labels
lowercase__: int = TFRoFormerForTokenClassification(config=lowerCamelCase_ )
lowercase__: Any = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowercase__: str = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
lowercase__: Tuple = TFRoFormerForQuestionAnswering(config=lowerCamelCase_ )
lowercase__: Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowercase__: List[str] = model(lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: Optional[int] = self.prepare_config_and_inputs()
(
lowercase__
): int = config_and_inputs
lowercase__: List[str] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __a ( lowercase_ , lowercase_ , unittest.TestCase ):
__lowercase : Tuple = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
__lowercase : Any = (
{
'feature-extraction': TFRoFormerModel,
'fill-mask': TFRoFormerForMaskedLM,
'question-answering': TFRoFormerForQuestionAnswering,
'text-classification': TFRoFormerForSequenceClassification,
'text-generation': TFRoFormerForCausalLM,
'token-classification': TFRoFormerForTokenClassification,
'zero-shot': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowercase : Dict = False
__lowercase : List[Any] = False
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: Union[str, Any] = TFRoFormerModelTester(self )
lowercase__: Tuple = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*lowerCamelCase_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
lowercase__: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: int = TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base' )
self.assertIsNotNone(lowerCamelCase_ )
@require_tf
class __a ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__: Tuple = TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
lowercase__: List[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase__: int = model(lowerCamelCase_ )[0]
# TODO Replace vocab size
lowercase__: Dict = 50_000
lowercase__: List[Any] = [1, 6, vocab_size]
self.assertEqual(output.shape , lowerCamelCase_ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
lowercase__: int = tf.constant(
[
[
[-0.1_2_0_5_3_3_4_1, -1.0_2_6_4_9_0_1, 0.2_9_2_2_1_9_4_6],
[-1.5_1_3_3_7_8_3, 0.1_9_7_4_3_3, 0.1_5_1_9_0_6_0_7],
[-5.0_1_3_5_4_0_3, -3.9_0_0_2_5_6, -0.8_4_0_3_8_7_6_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCamelCase_ , atol=1E-4 )
@require_tf
class __a ( unittest.TestCase ):
__lowercase : Optional[int] = 1E-4
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: Dict = tf.constant([[4, 10]] )
lowercase__: Union[str, Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
lowercase__: Optional[int] = emba(input_ids.shape )
lowercase__: Dict = tf.constant(
[[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0], [0.8_4_1_5, 0.0_4_6_4, 0.0_0_2_2, 0.5_4_0_3, 0.9_9_8_9, 1.0_0_0_0]] )
tf.debugging.assert_near(lowerCamelCase_ , lowerCamelCase_ , atol=self.tolerance )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: Tuple = tf.constant(
[
[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0],
[0.8_4_1_5, 0.8_2_1_9, 0.8_0_2_0, 0.7_8_1_9, 0.7_6_1_7],
[0.9_0_9_3, 0.9_3_6_4, 0.9_5_8_1, 0.9_7_4_9, 0.9_8_7_0],
] )
lowercase__: str = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
lowercase__: List[str] = emba.weight[:3, :5]
tf.debugging.assert_near(lowerCamelCase_ , lowerCamelCase_ , atol=self.tolerance )
@require_tf
class __a ( unittest.TestCase ):
__lowercase : Dict = 1E-4
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
lowercase__: Union[str, Any] = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowercase__: List[Any] = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowercase__: Optional[int] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
lowercase__: Optional[int] = embed_positions([2, 16, 768] )[None, None, :, :]
lowercase__: int = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
lowercase__: str = tf.constant(
[
[0.0_0_0_0, 0.0_1_0_0, 0.0_2_0_0, 0.0_3_0_0, 0.0_4_0_0, 0.0_5_0_0, 0.0_6_0_0, 0.0_7_0_0],
[-0.2_0_1_2, 0.8_8_9_7, 0.0_2_6_3, 0.9_4_0_1, 0.2_0_7_4, 0.9_4_6_3, 0.3_4_8_1, 0.9_3_4_3],
[-1.7_0_5_7, 0.6_2_7_1, -1.2_1_4_5, 1.3_8_9_7, -0.6_3_0_3, 1.7_6_4_7, -0.1_1_7_3, 1.8_9_8_5],
[-2.1_7_3_1, -1.6_3_9_7, -2.7_3_5_8, 0.2_8_5_4, -2.1_8_4_0, 1.7_1_8_3, -1.3_0_1_8, 2.4_8_7_1],
[0.2_7_1_7, -3.6_1_7_3, -2.9_2_0_6, -2.1_9_8_8, -3.6_6_3_8, 0.3_8_5_8, -2.9_1_5_5, 2.2_9_8_0],
[3.9_8_5_9, -2.1_5_8_0, -0.7_9_8_4, -4.4_9_0_4, -4.1_1_8_1, -2.0_2_5_2, -4.4_7_8_2, 1.1_2_5_3],
] )
lowercase__: int = tf.constant(
[
[0.0_0_0_0, -0.0_1_0_0, -0.0_2_0_0, -0.0_3_0_0, -0.0_4_0_0, -0.0_5_0_0, -0.0_6_0_0, -0.0_7_0_0],
[0.2_0_1_2, -0.8_8_9_7, -0.0_2_6_3, -0.9_4_0_1, -0.2_0_7_4, -0.9_4_6_3, -0.3_4_8_1, -0.9_3_4_3],
[1.7_0_5_7, -0.6_2_7_1, 1.2_1_4_5, -1.3_8_9_7, 0.6_3_0_3, -1.7_6_4_7, 0.1_1_7_3, -1.8_9_8_5],
[2.1_7_3_1, 1.6_3_9_7, 2.7_3_5_8, -0.2_8_5_4, 2.1_8_4_0, -1.7_1_8_3, 1.3_0_1_8, -2.4_8_7_1],
[-0.2_7_1_7, 3.6_1_7_3, 2.9_2_0_6, 2.1_9_8_8, 3.6_6_3_8, -0.3_8_5_8, 2.9_1_5_5, -2.2_9_8_0],
[-3.9_8_5_9, 2.1_5_8_0, 0.7_9_8_4, 4.4_9_0_4, 4.1_1_8_1, 2.0_2_5_2, 4.4_7_8_2, -1.1_2_5_3],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , lowerCamelCase_ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , lowerCamelCase_ , atol=self.tolerance )
| 196 |
'''simple docstring'''
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return number | (1 << position)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return number & ~(1 << position)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return number ^ (1 << position)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return ((number >> position) & 1) == 1
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323 | 0 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = (DDPMScheduler,)
def UpperCamelCase__ ( self ,**lowerCamelCase_ ) -> Tuple:
A = {
"""num_train_timesteps""": 1_0_0_0,
"""beta_start""": 0.00_01,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**lowerCamelCase_ )
return config
def UpperCamelCase__ ( self ) -> Tuple:
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> Any:
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] ,[0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCamelCase_ ,beta_end=lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> Optional[int]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> List[Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> Any:
self.check_over_configs(thresholding=lowerCamelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCamelCase_ ,prediction_type=lowerCamelCase_ ,sample_max_value=lowerCamelCase_ ,)
def UpperCamelCase__ ( self ) -> Dict:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> List[Any]:
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> str:
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**lowerCamelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_09_79 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1E-5
def UpperCamelCase__ ( self ) -> Optional[int]:
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**lowerCamelCase_ )
A = len(lowerCamelCase_ )
A = self.dummy_model()
A = self.dummy_sample_deter
A = torch.manual_seed(0 )
for t in reversed(range(lowerCamelCase_ ) ):
# 1. predict noise residual
A = model(lowerCamelCase_ ,lowerCamelCase_ )
# 2. predict previous mean of sample x_t-1
A = scheduler.step(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,generator=lowerCamelCase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
A = pred_prev_sample
A = torch.sum(torch.abs(lowerCamelCase_ ) )
A = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.33_72 ) < 1E-3
def UpperCamelCase__ ( self ) -> Optional[Any]:
A = self.scheduler_classes[0]
A = self.get_scheduler_config(prediction_type="""v_prediction""" )
A = scheduler_class(**lowerCamelCase_ )
A = len(lowerCamelCase_ )
A = self.dummy_model()
A = self.dummy_sample_deter
A = torch.manual_seed(0 )
for t in reversed(range(lowerCamelCase_ ) ):
# 1. predict noise residual
A = model(lowerCamelCase_ ,lowerCamelCase_ )
# 2. predict previous mean of sample x_t-1
A = scheduler.step(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,generator=lowerCamelCase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
A = pred_prev_sample
A = torch.sum(torch.abs(lowerCamelCase_ ) )
A = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1E-2
assert abs(result_mean.item() - 0.26_31 ) < 1E-3
def UpperCamelCase__ ( self ) -> List[Any]:
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**lowerCamelCase_ )
A = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
A = scheduler.timesteps
for i, timestep in enumerate(lowerCamelCase_ ):
if i == len(lowerCamelCase_ ) - 1:
A = -1
else:
A = timesteps[i + 1]
A = scheduler.previous_timestep(lowerCamelCase_ )
A = prev_t.item()
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> Optional[Any]:
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**lowerCamelCase_ )
A = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(lowerCamelCase_ ,msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**lowerCamelCase_ )
A = [1_0_0, 8_7, 5_0, 1, 0]
A = len(lowerCamelCase_ )
with self.assertRaises(lowerCamelCase_ ,msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=lowerCamelCase_ ,timesteps=lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> Optional[int]:
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**lowerCamelCase_ )
A = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCamelCase_ ,msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" ,):
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
| 367 |
"""simple docstring"""
from __future__ import annotations
import math
from collections.abc import Callable
def _A ( _a : Callable[[int | float], int | float] , _a : int | float , _a : int | float , _a : int = 1_0_0 , ):
"""simple docstring"""
A = x_start
A = fnc(_a )
A = 0.0
for _ in range(_a ):
# Approximates curve as a sequence of linear lines and sums their length
A = (x_end - x_start) / steps + xa
A = fnc(_a )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
A = xa
A = fxa
return length
if __name__ == "__main__":
def _A ( _a : Tuple ):
"""simple docstring"""
return math.sin(1_0 * x )
print("f(x) = sin(10 * x)")
print("The length of the curve from x = -10 to x = 10 is:")
UpperCAmelCase =10
while i <= 100_000:
print(f"""With {i} steps: {line_length(f, -10, 10, i)}""")
i *= 10
| 77 | 0 |
"""simple docstring"""
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
__A : Tuple = [
# (stable-diffusion, HF Diffusers)
('time_embed.0.weight', 'time_embedding.linear_1.weight'),
('time_embed.0.bias', 'time_embedding.linear_1.bias'),
('time_embed.2.weight', 'time_embedding.linear_2.weight'),
('time_embed.2.bias', 'time_embedding.linear_2.bias'),
('input_blocks.0.0.weight', 'conv_in.weight'),
('input_blocks.0.0.bias', 'conv_in.bias'),
('out.0.weight', 'conv_norm_out.weight'),
('out.0.bias', 'conv_norm_out.bias'),
('out.2.weight', 'conv_out.weight'),
('out.2.bias', 'conv_out.bias'),
]
__A : Any = [
# (stable-diffusion, HF Diffusers)
('in_layers.0', 'norm1'),
('in_layers.2', 'conv1'),
('out_layers.0', 'norm2'),
('out_layers.3', 'conv2'),
('emb_layers.1', 'time_emb_proj'),
('skip_connection', 'conv_shortcut'),
]
__A : int = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
__A : List[str] = F"""down_blocks.{i}.resnets.{j}."""
__A : List[str] = F"""input_blocks.{3*i + j + 1}.0."""
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
__A : Optional[Any] = F"""down_blocks.{i}.attentions.{j}."""
__A : Any = F"""input_blocks.{3*i + j + 1}.1."""
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
__A : Optional[int] = F"""up_blocks.{i}.resnets.{j}."""
__A : Optional[int] = F"""output_blocks.{3*i + j}.0."""
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
__A : List[Any] = F"""up_blocks.{i}.attentions.{j}."""
__A : Optional[Any] = F"""output_blocks.{3*i + j}.1."""
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
__A : Dict = F"""down_blocks.{i}.downsamplers.0.conv."""
__A : Tuple = F"""input_blocks.{3*(i+1)}.0.op."""
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
__A : Union[str, Any] = F"""up_blocks.{i}.upsamplers.0."""
__A : Optional[int] = F"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}."""
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
__A : Dict = 'mid_block.attentions.0.'
__A : List[Any] = 'middle_block.1.'
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
__A : Union[str, Any] = F"""mid_block.resnets.{j}."""
__A : Optional[Any] = F"""middle_block.{2*j}."""
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def lowercase ( __snake_case : int ):
# buyer beware: this is a *brittle* function,
# and correct output requires that all of these pieces interact in
# the exact order in which I have arranged them.
lowercase_ : Optional[int] = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
lowercase_ : Tuple = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
lowercase_ : Dict = v.replace(_A , _A )
lowercase_ : List[str] = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
lowercase_ : str = v.replace(_A , _A )
lowercase_ : Any = v
lowercase_ : str = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
__A : Tuple = [
# (stable-diffusion, HF Diffusers)
('nin_shortcut', 'conv_shortcut'),
('norm_out', 'conv_norm_out'),
('mid.attn_1.', 'mid_block.attentions.0.'),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
__A : Dict = F"""encoder.down_blocks.{i}.resnets.{j}."""
__A : int = F"""encoder.down.{i}.block.{j}."""
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
__A : str = F"""down_blocks.{i}.downsamplers.0."""
__A : Dict = F"""down.{i}.downsample."""
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
__A : Dict = F"""up_blocks.{i}.upsamplers.0."""
__A : Dict = F"""up.{3-i}.upsample."""
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
__A : Tuple = F"""decoder.up_blocks.{i}.resnets.{j}."""
__A : Dict = F"""decoder.up.{3-i}.block.{j}."""
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
__A : Optional[Any] = F"""mid_block.resnets.{i}."""
__A : List[str] = F"""mid.block_{i+1}."""
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
__A : int = [
# (stable-diffusion, HF Diffusers)
('norm.', 'group_norm.'),
('q.', 'query.'),
('k.', 'key.'),
('v.', 'value.'),
('proj_out.', 'proj_attn.'),
]
def lowercase ( __snake_case : Optional[int] ):
# convert HF linear weights to SD conv2d weights
return w.reshape(*w.shape , 1 , 1 )
def lowercase ( __snake_case : Dict ):
lowercase_ : int = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
lowercase_ : List[Any] = v.replace(_A , _A )
lowercase_ : Any = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
lowercase_ : Optional[Any] = v.replace(_A , _A )
lowercase_ : Dict = v
lowercase_ : str = {v: vae_state_dict[k] for k, v in mapping.items()}
lowercase_ : List[Any] = ['q', 'k', 'v', 'proj_out']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if F'''mid.attn_1.{weight_name}.weight''' in k:
print(F'''Reshaping {k} for SD format''' )
lowercase_ : int = reshape_weight_for_sd(_A )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
__A : str = [
# (stable-diffusion, HF Diffusers)
('resblocks.', 'text_model.encoder.layers.'),
('ln_1', 'layer_norm1'),
('ln_2', 'layer_norm2'),
('.c_fc.', '.fc1.'),
('.c_proj.', '.fc2.'),
('.attn', '.self_attn'),
('ln_final.', 'transformer.text_model.final_layer_norm.'),
('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'),
('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'),
]
__A : Dict = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
__A : Optional[int] = re.compile('''|'''.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
__A : Any = {'q': 0, 'k': 1, 'v': 2}
def lowercase ( __snake_case : List[str] ):
lowercase_ : List[str] = {}
lowercase_ : Optional[Any] = {}
lowercase_ : int = {}
for k, v in text_enc_dict.items():
if (
k.endswith('''.self_attn.q_proj.weight''' )
or k.endswith('''.self_attn.k_proj.weight''' )
or k.endswith('''.self_attn.v_proj.weight''' )
):
lowercase_ : Optional[Any] = k[: -len('''.q_proj.weight''' )]
lowercase_ : int = k[-len('''q_proj.weight''' )]
if k_pre not in capture_qkv_weight:
lowercase_ : List[Any] = [None, None, None]
lowercase_ : Dict = v
continue
if (
k.endswith('''.self_attn.q_proj.bias''' )
or k.endswith('''.self_attn.k_proj.bias''' )
or k.endswith('''.self_attn.v_proj.bias''' )
):
lowercase_ : Tuple = k[: -len('''.q_proj.bias''' )]
lowercase_ : int = k[-len('''q_proj.bias''' )]
if k_pre not in capture_qkv_bias:
lowercase_ : Optional[int] = [None, None, None]
lowercase_ : Optional[Any] = v
continue
lowercase_ : str = textenc_pattern.sub(lambda __snake_case : protected[re.escape(m.group(0 ) )] , _A )
lowercase_ : Dict = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
lowercase_ : Optional[Any] = textenc_pattern.sub(lambda __snake_case : protected[re.escape(m.group(0 ) )] , _A )
lowercase_ : Optional[int] = torch.cat(_A )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
lowercase_ : Optional[Any] = textenc_pattern.sub(lambda __snake_case : protected[re.escape(m.group(0 ) )] , _A )
lowercase_ : Tuple = torch.cat(_A )
return new_state_dict
def lowercase ( __snake_case : str ):
return text_enc_dict
if __name__ == "__main__":
__A : Dict = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--use_safetensors''', action='''store_true''', help='''Save weights use safetensors, default is ckpt.'''
)
__A : Optional[Any] = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
__A : Any = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.safetensors''')
__A : Union[str, Any] = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.safetensors''')
__A : List[Any] = osp.join(args.model_path, '''text_encoder''', '''model.safetensors''')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
__A : Any = load_file(unet_path, device='''cpu''')
else:
__A : Tuple = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.bin''')
__A : List[Any] = torch.load(unet_path, map_location='''cpu''')
if osp.exists(vae_path):
__A : List[str] = load_file(vae_path, device='''cpu''')
else:
__A : Union[str, Any] = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.bin''')
__A : Optional[Any] = torch.load(vae_path, map_location='''cpu''')
if osp.exists(text_enc_path):
__A : Any = load_file(text_enc_path, device='''cpu''')
else:
__A : List[Any] = osp.join(args.model_path, '''text_encoder''', '''pytorch_model.bin''')
__A : List[str] = torch.load(text_enc_path, map_location='''cpu''')
# Convert the UNet model
__A : List[Any] = convert_unet_state_dict(unet_state_dict)
__A : int = {'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
__A : str = convert_vae_state_dict(vae_state_dict)
__A : Tuple = {'first_stage_model.' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
__A : int = 'text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
__A : Optional[Any] = {'transformer.' + k: v for k, v in text_enc_dict.items()}
__A : Union[str, Any] = convert_text_enc_state_dict_vaa(text_enc_dict)
__A : Any = {'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()}
else:
__A : List[Any] = convert_text_enc_state_dict(text_enc_dict)
__A : Dict = {'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
__A : Tuple = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
__A : List[Any] = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
__A : str = {'state_dict': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 33 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowerCAmelCase: Dict = logging.get_logger(__name__)
lowerCAmelCase: str = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase: List[Any] = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
lowerCAmelCase: str = {
'allenai/led-base-16384': 1_6_3_8_4,
}
class a__( lowerCamelCase__ ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = LEDTokenizer
lowercase__ = ["""input_ids""", """attention_mask"""]
def __init__( self : List[Any] , __snake_case : Optional[Any]=None , __snake_case : List[str]=None , __snake_case : Tuple=None , __snake_case : Dict="replace" , __snake_case : int="<s>" , __snake_case : Any="</s>" , __snake_case : Optional[Any]="</s>" , __snake_case : Optional[Any]="<s>" , __snake_case : Optional[Any]="<unk>" , __snake_case : List[str]="<pad>" , __snake_case : int="<mask>" , __snake_case : int=False , __snake_case : str=True , **__snake_case : Tuple , ):
super().__init__(
__snake_case , __snake_case , tokenizer_file=__snake_case , errors=__snake_case , bos_token=__snake_case , eos_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , add_prefix_space=__snake_case , trim_offsets=__snake_case , **__snake_case , )
a : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __snake_case ) != add_prefix_space:
a : List[Any] = getattr(__snake_case , pre_tok_state.pop('type' ) )
a : Optional[Any] = add_prefix_space
a : Optional[Any] = pre_tok_class(**__snake_case )
a : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
a : Dict = 'post_processor'
a : int = getattr(self.backend_tokenizer , __snake_case , __snake_case )
if tokenizer_component_instance:
a : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
a : Any = tuple(state['sep'] )
if "cls" in state:
a : Any = tuple(state['cls'] )
a : Optional[Any] = False
if state.get('add_prefix_space' , __snake_case ) != add_prefix_space:
a : Any = add_prefix_space
a : Optional[Any] = True
if state.get('trim_offsets' , __snake_case ) != trim_offsets:
a : List[Any] = trim_offsets
a : Union[str, Any] = True
if changes_to_apply:
a : int = getattr(__snake_case , state.pop('type' ) )
a : List[Any] = component_class(**__snake_case )
setattr(self.backend_tokenizer , __snake_case , __snake_case )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowercase_ ( self : Dict ):
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase_ ( self : Dict , __snake_case : List[str] ):
a : Tuple = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else value
a : Optional[int] = value
def lowercase_ ( self : Optional[Any] , *__snake_case : Any , **__snake_case : Union[str, Any] ):
a : Dict = kwargs.get('is_split_into_words' , __snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*__snake_case , **__snake_case )
def lowercase_ ( self : Union[str, Any] , *__snake_case : Optional[int] , **__snake_case : List[str] ):
a : Optional[int] = kwargs.get('is_split_into_words' , __snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*__snake_case , **__snake_case )
def lowercase_ ( self : Dict , __snake_case : str , __snake_case : Optional[str] = None ):
a : Union[str, Any] = self._tokenizer.model.save(__snake_case , name=__snake_case )
return tuple(__snake_case )
def lowercase_ ( self : Union[str, Any] , __snake_case : str , __snake_case : int=None ):
a : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase_ ( self : Optional[int] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
a : int = [self.sep_token_id]
a : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_ ( self : List[str] , __snake_case : Union[Dict[str, EncodedInput], BatchEncoding] , __snake_case : Optional[int] = None , __snake_case : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , ):
a : Optional[Any] = super()._pad(
encoded_inputs=__snake_case , max_length=__snake_case , padding_strategy=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , )
# Load from model defaults
if return_attention_mask is None:
a : str = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
a : Any = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
a : Union[str, Any] = len(encoded_inputs['global_attention_mask'] ) != len(__snake_case )
if needs_to_be_padded:
a : str = len(__snake_case ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
a : Dict = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
a : Union[str, Any] = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs | 297 | 0 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
lowerCAmelCase = (
'https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'
)
lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def _a ( ):
"""simple docstring"""
lowercase__ = '''https://pypi.org/pypi/diffusers/json'''
lowercase__ = json.loads(request.urlopen(SCREAMING_SNAKE_CASE ).read() )['''releases'''].keys()
return sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : version.Version(SCREAMING_SNAKE_CASE ) )
def _a ( ):
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(SCREAMING_SNAKE_CASE )
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
lowercase__ = Path(SCREAMING_SNAKE_CASE ) / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
init_hf_modules()
lowercase__ = Path(SCREAMING_SNAKE_CASE ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
lowercase__ = dynamic_module_path / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as f:
lowercase__ = f.read()
# Imports of the form `import .xxx`
lowercase__ = re.findall('''^\s*import\s+\.(\S+)\s*$''' , SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Unique-ify
return list(set(SCREAMING_SNAKE_CASE ) )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = False
lowercase__ = [module_file]
lowercase__ = []
# Let's recurse through all relative imports
while not no_change:
lowercase__ = []
for f in files_to_check:
new_imports.extend(get_relative_imports(SCREAMING_SNAKE_CASE ) )
lowercase__ = Path(SCREAMING_SNAKE_CASE ).parent
lowercase__ = [str(module_path / m ) for m in new_imports]
lowercase__ = [f for f in new_import_files if f not in all_relative_imports]
lowercase__ = [f'{f}.py' for f in new_import_files]
lowercase__ = len(SCREAMING_SNAKE_CASE ) == 0
all_relative_imports.extend(SCREAMING_SNAKE_CASE )
return all_relative_imports
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as f:
lowercase__ = f.read()
# Imports of the form `import xxx`
lowercase__ = re.findall('''^\s*import\s+(\S+)\s*$''' , SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('''^\s*from\s+(\S+)\s+import''' , SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Only keep the top-level module
lowercase__ = [imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )]
# Unique-ify and test we got them all
lowercase__ = list(set(SCREAMING_SNAKE_CASE ) )
lowercase__ = []
for imp in imports:
try:
importlib.import_module(SCREAMING_SNAKE_CASE )
except ImportError:
missing_packages.append(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
raise ImportError(
'''This modeling file requires the following packages that were not found in your environment: '''
f'{", ".join(SCREAMING_SNAKE_CASE )}. Run `pip install {" ".join(SCREAMING_SNAKE_CASE )}`' )
return get_relative_imports(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = module_path.replace(os.path.sep , '''.''' )
lowercase__ = importlib.import_module(SCREAMING_SNAKE_CASE )
if class_name is None:
return find_pipeline_class(SCREAMING_SNAKE_CASE )
return getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
from ..pipelines import DiffusionPipeline
lowercase__ = dict(inspect.getmembers(SCREAMING_SNAKE_CASE , inspect.isclass ) )
lowercase__ = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , SCREAMING_SNAKE_CASE )
and cls.__module__.split('''.''' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'
f' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'
f' {loaded_module}.' )
lowercase__ = cls
return pipeline_class
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , ):
"""simple docstring"""
lowercase__ = str(SCREAMING_SNAKE_CASE )
lowercase__ = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if os.path.isfile(SCREAMING_SNAKE_CASE ):
lowercase__ = module_file_or_url
lowercase__ = '''local'''
elif pretrained_model_name_or_path.count('''/''' ) == 0:
lowercase__ = get_diffusers_versions()
# cut ".dev0"
lowercase__ = '''v''' + '''.'''.join(__version__.split('''.''' )[:3] )
# retrieve github version that matches
if revision is None:
lowercase__ = latest_version if latest_version[1:] in available_versions else '''main'''
logger.info(f'Defaulting to latest_version: {revision}.' )
elif revision in available_versions:
lowercase__ = f'v{revision}'
elif revision == "main":
lowercase__ = revision
else:
raise ValueError(
f'`custom_revision`: {revision} does not exist. Please make sure to choose one of'
f' {", ".join(available_versions + ["main"] )}.' )
# community pipeline on GitHub
lowercase__ = COMMUNITY_PIPELINES_URL.format(revision=SCREAMING_SNAKE_CASE , pipeline=SCREAMING_SNAKE_CASE )
try:
lowercase__ = cached_download(
SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , force_download=SCREAMING_SNAKE_CASE , proxies=SCREAMING_SNAKE_CASE , resume_download=SCREAMING_SNAKE_CASE , local_files_only=SCREAMING_SNAKE_CASE , use_auth_token=SCREAMING_SNAKE_CASE , )
lowercase__ = '''git'''
lowercase__ = pretrained_model_name_or_path + '''.py'''
except EnvironmentError:
logger.error(f'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
else:
try:
# Load from URL or cache if already cached
lowercase__ = hf_hub_download(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , force_download=SCREAMING_SNAKE_CASE , proxies=SCREAMING_SNAKE_CASE , resume_download=SCREAMING_SNAKE_CASE , local_files_only=SCREAMING_SNAKE_CASE , use_auth_token=SCREAMING_SNAKE_CASE , )
lowercase__ = os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) )
except EnvironmentError:
logger.error(f'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
# Check we have all the requirements in our environment
lowercase__ = check_imports(SCREAMING_SNAKE_CASE )
# Now we move the module inside our cached dynamic modules.
lowercase__ = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(SCREAMING_SNAKE_CASE )
lowercase__ = Path(SCREAMING_SNAKE_CASE ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(SCREAMING_SNAKE_CASE , submodule_path / module_file )
for module_needed in modules_needed:
lowercase__ = f'{module_needed}.py'
shutil.copy(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ = use_auth_token
elif use_auth_token is True:
lowercase__ = HfFolder.get_token()
else:
lowercase__ = None
lowercase__ = model_info(SCREAMING_SNAKE_CASE , revision=SCREAMING_SNAKE_CASE , token=SCREAMING_SNAKE_CASE ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
lowercase__ = submodule_path / commit_hash
lowercase__ = full_submodule + os.path.sep + commit_hash
create_dynamic_module(SCREAMING_SNAKE_CASE )
if not (submodule_path / module_file).exists():
shutil.copy(SCREAMING_SNAKE_CASE , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
SCREAMING_SNAKE_CASE , f'{module_needed}.py' , cache_dir=SCREAMING_SNAKE_CASE , force_download=SCREAMING_SNAKE_CASE , resume_download=SCREAMING_SNAKE_CASE , proxies=SCREAMING_SNAKE_CASE , use_auth_token=SCREAMING_SNAKE_CASE , revision=SCREAMING_SNAKE_CASE , local_files_only=SCREAMING_SNAKE_CASE , )
return os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , **SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase__ = get_cached_module_file(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , force_download=SCREAMING_SNAKE_CASE , resume_download=SCREAMING_SNAKE_CASE , proxies=SCREAMING_SNAKE_CASE , use_auth_token=SCREAMING_SNAKE_CASE , revision=SCREAMING_SNAKE_CASE , local_files_only=SCREAMING_SNAKE_CASE , )
return get_class_in_module(SCREAMING_SNAKE_CASE , final_module.replace('''.py''' , '''''' ) )
| 371 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
lowerCAmelCase = 'Create a default config file for Accelerate with only a few flags set.'
def _a ( SCREAMING_SNAKE_CASE="no" , SCREAMING_SNAKE_CASE = default_json_config_file , SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
lowercase__ = Path(SCREAMING_SNAKE_CASE )
path.parent.mkdir(parents=SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
if path.exists():
print(
f'Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.' )
return False
lowercase__ = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
f'`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}' )
lowercase__ = {
'''compute_environment''': '''LOCAL_MACHINE''',
'''mixed_precision''': mixed_precision,
}
if torch.cuda.is_available():
lowercase__ = torch.cuda.device_count()
lowercase__ = num_gpus
lowercase__ = False
if num_gpus > 1:
lowercase__ = '''MULTI_GPU'''
else:
lowercase__ = '''NO'''
elif is_xpu_available() and use_xpu:
lowercase__ = torch.xpu.device_count()
lowercase__ = num_xpus
lowercase__ = False
if num_xpus > 1:
lowercase__ = '''MULTI_XPU'''
else:
lowercase__ = '''NO'''
elif is_npu_available():
lowercase__ = torch.npu.device_count()
lowercase__ = num_npus
lowercase__ = False
if num_npus > 1:
lowercase__ = '''MULTI_NPU'''
else:
lowercase__ = '''NO'''
else:
lowercase__ = 0
lowercase__ = True
lowercase__ = 1
lowercase__ = '''NO'''
lowercase__ = ClusterConfig(**SCREAMING_SNAKE_CASE )
config.to_json_file(SCREAMING_SNAKE_CASE )
return path
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = parser.add_parser('''default''' , parents=SCREAMING_SNAKE_CASE , help=SCREAMING_SNAKE_CASE , formatter_class=SCREAMING_SNAKE_CASE )
parser.add_argument(
'''--config_file''' , default=SCREAMING_SNAKE_CASE , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , dest='''save_location''' , )
parser.add_argument(
'''--mixed_precision''' , choices=['''no''', '''fp16''', '''bf16'''] , type=SCREAMING_SNAKE_CASE , help='''Whether or not to use mixed precision training. '''
'''Choose between FP16 and BF16 (bfloat16) training. '''
'''BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.''' , default='''no''' , )
parser.set_defaults(func=SCREAMING_SNAKE_CASE )
return parser
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(f'accelerate configuration saved at {config_file}' )
| 93 | 0 |
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowerCAmelCase ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = BarthezTokenizer
SCREAMING_SNAKE_CASE_ : Dict = BarthezTokenizerFast
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
def __A ( self ) -> Dict:
super().setUp()
SCREAMING_SNAKE_CASE = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer
def __A ( self ) -> Any:
SCREAMING_SNAKE_CASE = '<pad>'
SCREAMING_SNAKE_CASE = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def __A ( self ) -> Dict:
SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(lowerCAmelCase__ ) , 101_122 )
def __A ( self ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 101_122 )
@require_torch
def __A ( self ) -> str:
SCREAMING_SNAKE_CASE = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
SCREAMING_SNAKE_CASE = [0, 57, 3_018, 70_307, 91, 2]
SCREAMING_SNAKE_CASE = self.tokenizer(
lowerCAmelCase__ , max_length=len(lowerCAmelCase__ ) , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors='pt' )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
SCREAMING_SNAKE_CASE = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __A ( self ) -> Union[str, Any]:
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = 'I was born in 92000, and this is falsé.'
SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def __A ( self ) -> str:
# fmt: off
SCREAMING_SNAKE_CASE = {'input_ids': [[0, 490, 14_328, 4_507, 354, 47, 43_669, 95, 25, 78_117, 20_215, 19_779, 190, 22, 400, 4, 35_343, 80_310, 603, 86, 24_937, 105, 33_438, 94_762, 196, 39_642, 7, 15, 15_933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10_534, 87, 25, 66, 3_358, 196, 55_289, 8, 82_961, 81, 2_204, 75_203, 7, 15, 763, 12_956, 216, 178, 14_328, 9_595, 1_377, 69_693, 7, 448, 71_021, 196, 18_106, 1_437, 13_974, 108, 9_083, 4, 49_315, 7, 39, 86, 1_326, 2_793, 46_333, 4, 448, 196, 74_588, 7, 49_315, 7, 39, 21, 822, 38_470, 74, 21, 66_723, 62_480, 8, 22_050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
SCREAMING_SNAKE_CASE = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=lowerCAmelCase__ , )
| 113 |
"""simple docstring"""
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__UpperCamelCase = [
'''cross_validation.py''',
'''gradient_accumulation.py''',
'''local_sgd.py''',
'''multi_process_metrics.py''',
'''memory.py''',
'''automatic_gradient_accumulation.py''',
'''fsdp_with_peak_mem_tracking.py''',
'''deepspeed_with_config_support.py''',
'''megatron_lm_gpt_pretraining.py''',
]
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None ) -> int:
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = os.path.abspath(os.path.join('examples' , 'by_feature' ) )
SCREAMING_SNAKE_CASE = os.path.abspath('examples' )
for item in os.listdir(lowerCAmelCase__ ):
if item not in EXCLUDE_EXAMPLES:
SCREAMING_SNAKE_CASE = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
if os.path.isfile(lowerCAmelCase__ ) and ".py" in item_path:
with self.subTest(
tested_script=lowerCAmelCase__ , feature_script=lowerCAmelCase__ , tested_section='main()' if parser_only else 'training_function()' , ):
SCREAMING_SNAKE_CASE = compare_against_test(
os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = '\n'.join(lowerCAmelCase__ )
if special_strings is not None:
for string in special_strings:
SCREAMING_SNAKE_CASE = diff.replace(lowerCAmelCase__ , '' )
self.assertEqual(lowerCAmelCase__ , '' )
def __A ( self ) -> Optional[int]:
self.one_complete_example('complete_nlp_example.py' , lowerCAmelCase__ )
self.one_complete_example('complete_nlp_example.py' , lowerCAmelCase__ )
def __A ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE = os.path.abspath(os.path.join('examples' , 'cv_example.py' ) )
SCREAMING_SNAKE_CASE = [
' ' * 16 + '{\n\n',
' ' * 20 + '"accuracy": eval_metric["accuracy"],\n\n',
' ' * 20 + '"f1": eval_metric["f1"],\n\n',
' ' * 20 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n',
' ' * 20 + '"epoch": epoch,\n\n',
' ' * 16 + '},\n\n',
' ' * 16 + 'step=epoch,\n',
' ' * 12,
' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n',
]
self.one_complete_example('complete_cv_example.py' , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
self.one_complete_example('complete_cv_example.py' , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
@mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""} )
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = False
@classmethod
def __A ( cls ) -> List[str]:
super().setUpClass()
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = os.path.join(cls._tmpdir , 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
SCREAMING_SNAKE_CASE = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def __A ( cls ) -> Dict:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def __A ( self ) -> int:
SCREAMING_SNAKE_CASE = F'\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'epoch_0' ) ) )
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = F'\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '.split()
SCREAMING_SNAKE_CASE = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'step_2' ) ) )
def __A ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = F'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}\n '.split()
SCREAMING_SNAKE_CASE = run_command(self._launch_args + testargs , return_stdout=lowerCAmelCase__ )
self.assertNotIn('epoch 0:' , lowerCAmelCase__ )
self.assertIn('epoch 1:' , lowerCAmelCase__ )
def __A ( self ) -> int:
SCREAMING_SNAKE_CASE = F'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}\n '.split()
SCREAMING_SNAKE_CASE = run_command(self._launch_args + testargs , return_stdout=lowerCAmelCase__ )
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE = torch.cuda.device_count()
else:
SCREAMING_SNAKE_CASE = 1
if num_processes > 1:
self.assertNotIn('epoch 0:' , lowerCAmelCase__ )
self.assertIn('epoch 1:' , lowerCAmelCase__ )
else:
self.assertIn('epoch 0:' , lowerCAmelCase__ )
self.assertIn('epoch 1:' , lowerCAmelCase__ )
@slow
def __A ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split()
with mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '0'} ):
SCREAMING_SNAKE_CASE = run_command(self._launch_args + testargs , return_stdout=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = re.findall('({.+})' , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [r for r in results if 'accuracy' in r][-1]
SCREAMING_SNAKE_CASE = ast.literal_eval(lowerCAmelCase__ )
self.assertGreaterEqual(results['accuracy'] , 0.75 )
def __A ( self ) -> str:
SCREAMING_SNAKE_CASE = ['examples/by_feature/multi_process_metrics.py']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def __A ( self ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdir:
SCREAMING_SNAKE_CASE = F'\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , 'tracking' ) ) )
def __A ( self ) -> Dict:
SCREAMING_SNAKE_CASE = ['examples/by_feature/gradient_accumulation.py']
run_command(self._launch_args + testargs )
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = ['examples/by_feature/local_sgd.py']
run_command(self._launch_args + testargs )
| 113 | 1 |
'''simple docstring'''
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class _a ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, A, A=2, A=56, A=True, A=True, A=True, A=True, A=99, A=32, A=2, A=2, A=7, A="gelu_new", A=0.1, A=0.1, A=512, A=16, A=2, A=0.02, A=4, A="block_sparse", A=True, A=False, A=2, A=3, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : List[str] = seq_length
SCREAMING_SNAKE_CASE : Optional[Any] = is_training
SCREAMING_SNAKE_CASE : Dict = use_attention_mask
SCREAMING_SNAKE_CASE : List[Any] = use_token_type_ids
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = intermediate_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Any = max_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
SCREAMING_SNAKE_CASE : Any = type_sequence_label_size
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Optional[Any] = num_choices
SCREAMING_SNAKE_CASE : int = rescale_embeddings
SCREAMING_SNAKE_CASE : Any = attention_type
SCREAMING_SNAKE_CASE : str = use_bias
SCREAMING_SNAKE_CASE : Tuple = block_size
SCREAMING_SNAKE_CASE : List[Any] = num_random_blocks
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
SCREAMING_SNAKE_CASE : List[str] = BigBirdConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=A, initializer_range=self.initializer_range, attention_type=self.attention_type, block_size=self.block_size, num_random_blocks=self.num_random_blocks, use_bias=self.use_bias, rescale_embeddings=self.rescale_embeddings, )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE : List[str] = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'attention_mask': attention_mask,
}
return config, inputs_dict
@require_flax
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : str = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
A : List[Any] = False
A : List[str] = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_hidden_states_output()
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = model_class_name.from_pretrained('google/bigbird-roberta-base' )
self.assertIsNotNone(A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE : Any = self._prepare_for_class(A, A )
SCREAMING_SNAKE_CASE : List[str] = model_class(A )
@jax.jit
def model_jitted(A, A=None, **A ):
return model(input_ids=A, attention_mask=A, **A )
with self.subTest('JIT Enabled' ):
SCREAMING_SNAKE_CASE : List[str] = model_jitted(**A ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE : Optional[Any] = model_jitted(**A ).to_tuple()
self.assertEqual(len(A ), len(A ) )
for jitted_output, output in zip(A, A ):
self.assertEqual(jitted_output.shape, output.shape )
def UpperCamelCase_ ( self, A, A, A, A=1E-5, A="outputs", A=None ):
'''simple docstring'''
if name.startswith('outputs.attentions' ):
return
else:
super().check_pt_flax_outputs(A, A, A, A, A, A )
| 246 |
'''simple docstring'''
from __future__ import annotations
from typing import Generic, TypeVar
UpperCamelCase_ = TypeVar("T")
class _a ( Generic[T] ):
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = data
SCREAMING_SNAKE_CASE : int = self
SCREAMING_SNAKE_CASE : Optional[Any] = 0
class _a ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : dict[T, DisjointSetTreeNode[T]] = {}
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = DisjointSetTreeNode(A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.map[data]
if elem_ref != elem_ref.parent:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
if nodea.rank > nodea.rank:
SCREAMING_SNAKE_CASE : Optional[int] = nodea
else:
SCREAMING_SNAKE_CASE : Optional[int] = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
self.link(self.find_set(A ), self.find_set(A ) )
class _a ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : dict[T, dict[T, int]] = {}
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if node not in self.connections:
SCREAMING_SNAKE_CASE : Dict = {}
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
self.add_node(A )
self.add_node(A )
SCREAMING_SNAKE_CASE : Dict = weight
SCREAMING_SNAKE_CASE : Optional[int] = weight
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : int = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda A : x[2] )
# creating the disjoint set
SCREAMING_SNAKE_CASE : int = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(A )
# MST generation
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : Dict = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = edges[index]
index += 1
SCREAMING_SNAKE_CASE : List[Any] = disjoint_set.find_set(A )
SCREAMING_SNAKE_CASE : Tuple = disjoint_set.find_set(A )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(A, A, A )
disjoint_set.union(A, A )
return graph
| 246 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'OPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OPTForCausalLM',
'OPTModel',
'OPTPreTrainedModel',
'OPTForSequenceClassification',
'OPTForQuestionAnswering',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'FlaxOPTForCausalLM',
'FlaxOPTModel',
'FlaxOPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 29 |
import math
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> bool:
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
__lowercase = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple=1 , **SCREAMING_SNAKE_CASE : Tuple ) -> Dict:
__lowercase = factor * value
__lowercase = value
while not is_prime(SCREAMING_SNAKE_CASE ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **SCREAMING_SNAKE_CASE )
return value
| 325 | 0 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
) | 196 |
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> list:
UpperCamelCase__ : Tuple = False
while is_sorted is False: # Until all the indices are traversed keep looping
UpperCamelCase__ : Tuple = True
for i in range(0 , len(__lowerCAmelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
UpperCamelCase__ , UpperCamelCase__ : Dict = input_list[i + 1], input_list[i]
# swapping if elements not in order
UpperCamelCase__ : List[str] = False
for i in range(1 , len(__lowerCAmelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
UpperCamelCase__ , UpperCamelCase__ : Optional[int] = input_list[i + 1], input_list[i]
# swapping if elements not in order
UpperCamelCase__ : List[Any] = False
return input_list
if __name__ == "__main__":
print('''Enter list to be sorted''')
lowerCamelCase : Any =[int(x) for x in input().split()]
# inputing elements of the list in one line
lowerCamelCase : Optional[int] =odd_even_sort(input_list)
print('''The sorted list is''')
print(sorted_list) | 196 | 1 |
'''simple docstring'''
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
a_ : str = {
"bart": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"bert": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-base-cased-finetuned-mrpc": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"dpr": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"gpt2": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlnet": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlm": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlm-roberta": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"transfo-xl": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"openai-gpt": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"roberta": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"layoutlm": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"roberta-large-mnli": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"camembert": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"flaubert": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"distilbert": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"distilbert-base-distilled-squad": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"lxmert": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"lxmert-visual-feature-encoder": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"ctrl": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"albert": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"t5": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"electra": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"wav2vec2": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def _A (lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Any=False , lowerCAmelCase__ :Any=True ) -> Optional[int]:
'''simple docstring'''
if model_type not in MODEL_CLASSES:
raise ValueError(f'Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.' )
_a , _a , _a , _a = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
_a = cached_file(lowerCAmelCase__ , lowerCAmelCase__ , force_download=not use_cached_models )
_a = config_class.from_json_file(lowerCAmelCase__ )
_a = True
_a = True
print(f'Building TensorFlow model from configuration: {config}' )
_a = model_class(lowerCAmelCase__ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
_a = cached_file(
lowerCAmelCase__ , lowerCAmelCase__ , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
_a = load_pytorch_checkpoint_in_tfa_model(lowerCAmelCase__ , lowerCAmelCase__ )
if compare_with_pt_model:
_a = tf_model(tf_model.dummy_inputs , training=lowerCAmelCase__ ) # build the network
_a = torch.load(lowerCAmelCase__ , map_location='cpu' )
_a = pt_model_class.from_pretrained(
pretrained_model_name_or_path=lowerCAmelCase__ , config=lowerCAmelCase__ , state_dict=lowerCAmelCase__ )
with torch.no_grad():
_a = pt_model(**pt_model.dummy_inputs )
_a = pto[0].numpy()
_a = tfo[0].numpy()
_a = np.amax(np.abs(np_pt - np_tf ) )
print(f'Max absolute difference between models outputs {diff}' )
assert diff <= 2E-2, f'Error, model absolute difference is >2e-2: {diff}'
# Save pytorch-model
print(f'Save TensorFlow model to {tf_dump_path}' )
tf_model.save_weights(lowerCAmelCase__ , save_format='h5' )
def _A (lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Any=None , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :List[str]=False , lowerCAmelCase__ :Optional[int]=False , lowerCAmelCase__ :Union[str, Any]=False , lowerCAmelCase__ :int=False , ) -> Dict:
'''simple docstring'''
if args_model_type is None:
_a = list(MODEL_CLASSES.keys() )
else:
_a = [args_model_type]
for j, model_type in enumerate(lowerCAmelCase__ , start=1 ):
print('=' * 1_00 )
print(f' Converting model type {j}/{len(lowerCAmelCase__ )}: {model_type}' )
print('=' * 1_00 )
if model_type not in MODEL_CLASSES:
raise ValueError(f'Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.' )
_a , _a , _a , _a , _a = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
_a = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
_a = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(lowerCAmelCase__ , lowerCAmelCase__ ) , start=1 ):
print('-' * 1_00 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f' Skipping finetuned checkpoint {model_shortcut_name}' )
continue
_a = model_shortcut_name
elif only_convert_finetuned_models:
print(f' Skipping not finetuned checkpoint {model_shortcut_name}' )
continue
print(
f' Converting checkpoint {i}/{len(lowerCAmelCase__ )}: {model_shortcut_name} - model_type {model_type}' )
print('-' * 1_00 )
if config_shortcut_name in aws_config_map:
_a = cached_file(lowerCAmelCase__ , lowerCAmelCase__ , force_download=not use_cached_models )
else:
_a = config_shortcut_name
if model_shortcut_name in aws_model_maps:
_a = cached_file(lowerCAmelCase__ , lowerCAmelCase__ , force_download=not use_cached_models )
else:
_a = model_shortcut_name
if os.path.isfile(lowerCAmelCase__ ):
_a = 'converted_model'
convert_pt_checkpoint_to_tf(
model_type=lowerCAmelCase__ , pytorch_checkpoint_path=lowerCAmelCase__ , config_file=lowerCAmelCase__ , tf_dump_path=os.path.join(lowerCAmelCase__ , model_shortcut_name + '-tf_model.h5' ) , compare_with_pt_model=lowerCAmelCase__ , )
if remove_cached_files:
os.remove(lowerCAmelCase__ )
os.remove(lowerCAmelCase__ )
if __name__ == "__main__":
a_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_dump_path", default=None, type=str, required=True, help="Path to the output Tensorflow dump file."
)
parser.add_argument(
"--model_type",
default=None,
type=str,
help=(
f'''Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and '''
"convert all the models from AWS."
),
)
parser.add_argument(
"--pytorch_checkpoint_path",
default=None,
type=str,
help=(
"Path to the PyTorch checkpoint path or shortcut name to download from AWS. "
"If not given, will download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--config_file",
default=None,
type=str,
help=(
"The config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture. If not given and "
"--pytorch_checkpoint_path is not given or is a shortcut name "
"use the configuration associated to the shortcut name on the AWS"
),
)
parser.add_argument(
"--compare_with_pt_model", action="store_true", help="Compare Tensorflow and PyTorch model predictions."
)
parser.add_argument(
"--use_cached_models",
action="store_true",
help="Use cached models if possible instead of updating to latest checkpoint versions.",
)
parser.add_argument(
"--remove_cached_files",
action="store_true",
help="Remove pytorch models after conversion (save memory when converting in batches).",
)
parser.add_argument("--only_convert_finetuned_models", action="store_true", help="Only convert finetuned models.")
a_ : Dict = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 168 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a_ : Optional[Any] = logging.get_logger(__name__)
a_ : List[Any] = {"vocab_file": "spiece.model"}
a_ : List[str] = {
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
}
}
a_ : List[Any] = {
"google/bigbird-roberta-base": 4_0_9_6,
"google/bigbird-roberta-large": 4_0_9_6,
"google/bigbird-base-trivia-itc": 4_0_9_6,
}
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ["""input_ids""", """attention_mask"""]
_lowerCAmelCase = []
def __init__( self , __magic_name__ , __magic_name__="<unk>" , __magic_name__="<s>" , __magic_name__="</s>" , __magic_name__="<pad>" , __magic_name__="[SEP]" , __magic_name__="[MASK]" , __magic_name__="[CLS]" , __magic_name__ = None , **__magic_name__ , ) -> None:
_a = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else bos_token
_a = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else eos_token
_a = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else unk_token
_a = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else pad_token
_a = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else cls_token
_a = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
_a = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else mask_token
_a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , sep_token=__magic_name__ , mask_token=__magic_name__ , cls_token=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , )
_a = vocab_file
_a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__magic_name__ )
@property
def __UpperCAmelCase ( self ) -> str:
return self.sp_model.get_piece_size()
def __UpperCAmelCase ( self ) -> int:
_a = {self.convert_ids_to_tokens(__magic_name__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[str]:
_a = self.__dict__.copy()
_a = None
return state
def __setstate__( self , __magic_name__ ) -> Union[str, Any]:
_a = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_a = {}
_a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self , __magic_name__ ) -> List[str]:
return self.sp_model.encode(__magic_name__ , out_type=__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ ) -> Union[str, Any]:
return self.sp_model.piece_to_id(__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ ) -> str:
_a = self.sp_model.IdToPiece(__magic_name__ )
return token
def __UpperCAmelCase ( self , __magic_name__ ) -> Optional[Any]:
_a = []
_a = ''
_a = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__magic_name__ ) + token
_a = True
_a = []
else:
current_sub_tokens.append(__magic_name__ )
_a = False
out_string += self.sp_model.decode(__magic_name__ )
return out_string.strip()
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = False , __magic_name__ = None , __magic_name__ = True , **__magic_name__ , ) -> str:
_a = kwargs.pop('use_source_tokenizer' , __magic_name__ )
_a = self.convert_ids_to_tokens(__magic_name__ , skip_special_tokens=__magic_name__ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_a = []
_a = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__magic_name__ ) )
_a = []
sub_texts.append(__magic_name__ )
else:
current_sub_text.append(__magic_name__ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__magic_name__ ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
_a = re.sub(r' (\[(MASK|SEP)\])' , r'\1' , ' '.join(__magic_name__ ) )
else:
_a = ''.join(__magic_name__ )
_a = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_a = self.clean_up_tokenization(__magic_name__ )
return clean_text
else:
return text
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None ) -> Tuple[str]:
if not os.path.isdir(__magic_name__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_a = os.path.join(
__magic_name__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __magic_name__ )
elif not os.path.isfile(self.vocab_file ):
with open(__magic_name__ , 'wb' ) as fi:
_a = self.sp_model.serialized_model_proto()
fi.write(__magic_name__ )
return (out_vocab_file,)
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_a = [self.cls_token_id]
_a = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ )
if token_ids_a is None:
return [1] + ([0] * len(__magic_name__ )) + [1]
return [1] + ([0] * len(__magic_name__ )) + [1] + ([0] * len(__magic_name__ )) + [1]
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None ) -> List[int]:
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 168 | 1 |
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 3 , __UpperCamelCase : int = 7 , __UpperCamelCase : int = 100_0000 ) -> int:
UpperCAmelCase_ = 0
UpperCAmelCase_ = 1
for current_denominator in range(1 , limit + 1 ):
UpperCAmelCase_ = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
UpperCAmelCase_ = current_numerator
UpperCAmelCase_ = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_00_00_00))
| 177 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class a ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , __snake_case : int , __snake_case : List[Any]=7 , __snake_case : Any=3 , __snake_case : Any=18 , __snake_case : str=30 , __snake_case : Any=4_00 , __snake_case : Optional[int]=True , __snake_case : str=None , __snake_case : Any=True , __snake_case : List[Any]=None , ):
UpperCAmelCase_ = size if size is not None else {'''shortest_edge''': 20}
UpperCAmelCase_ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
def lowerCamelCase_ ( self : Any ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class a ( _A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = MobileNetVaImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self : Any ):
UpperCAmelCase_ = MobileNetVaImageProcessingTester(self )
@property
def lowerCamelCase_ ( self : List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self : Optional[int] ):
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , '''do_resize''' ) )
self.assertTrue(hasattr(__snake_case , '''size''' ) )
self.assertTrue(hasattr(__snake_case , '''do_center_crop''' ) )
self.assertTrue(hasattr(__snake_case , '''crop_size''' ) )
def lowerCamelCase_ ( self : Dict ):
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def lowerCamelCase_ ( self : Optional[int] ):
pass
def lowerCamelCase_ ( self : Tuple ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCamelCase_ ( self : str ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCamelCase_ ( self : int ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 177 | 1 |
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase : Any = logging.get_logger(__name__)
_lowercase : str = {
'''vocab_file''': '''vocab.txt''',
'''merges_file''': '''bpe.codes''',
}
_lowercase : Dict = {
'''vocab_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt''',
},
'''merges_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes''',
},
}
_lowercase : Optional[int] = {
'''vinai/phobert-base''': 2_56,
'''vinai/phobert-large''': 2_56,
}
def lowercase__ ( snake_case_ :Tuple ):
__UpperCAmelCase = set()
__UpperCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase = char
__UpperCAmelCase = set(_A )
return pairs
class _UpperCAmelCase ( A__ ):
a__ : Tuple = VOCAB_FILES_NAMES
a__ : Dict = PRETRAINED_VOCAB_FILES_MAP
a__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] , _lowercase : Dict , _lowercase : str , _lowercase : Optional[Any]="<s>" , _lowercase : List[str]="</s>" , _lowercase : Dict="</s>" , _lowercase : Dict="<s>" , _lowercase : List[str]="<unk>" , _lowercase : Optional[Any]="<pad>" , _lowercase : Union[str, Any]="<mask>" , **_lowercase : Optional[int] , ):
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , **__lowerCamelCase , )
__UpperCAmelCase = vocab_file
__UpperCAmelCase = merges_file
__UpperCAmelCase = {}
__UpperCAmelCase = 0
__UpperCAmelCase = 1
__UpperCAmelCase = 2
__UpperCAmelCase = 3
self.add_from_file(__lowerCamelCase )
__UpperCAmelCase = {v: k for k, v in self.encoder.items()}
with open(__lowerCamelCase , encoding='''utf-8''' ) as merges_handle:
__UpperCAmelCase = merges_handle.read().split('''\n''' )[:-1]
__UpperCAmelCase = [tuple(merge.split()[:-1] ) for merge in merges]
__UpperCAmelCase = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
__UpperCAmelCase = {}
def a ( self : Dict , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
__UpperCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a ( self : Union[str, Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def a ( self : List[Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def a ( self : Dict ):
return len(self.encoder )
def a ( self : List[Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def a ( self : Any , _lowercase : Any ):
if token in self.cache:
return self.cache[token]
__UpperCAmelCase = tuple(__lowerCamelCase )
__UpperCAmelCase = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
__UpperCAmelCase = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
__UpperCAmelCase = min(__lowerCamelCase , key=lambda _lowercase : self.bpe_ranks.get(__lowerCamelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase = bigram
__UpperCAmelCase = []
__UpperCAmelCase = 0
while i < len(__lowerCamelCase ):
try:
__UpperCAmelCase = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCAmelCase = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase = tuple(__lowerCamelCase )
__UpperCAmelCase = new_word
if len(__lowerCamelCase ) == 1:
break
else:
__UpperCAmelCase = get_pairs(__lowerCamelCase )
__UpperCAmelCase = '''@@ '''.join(__lowerCamelCase )
__UpperCAmelCase = word[:-4]
__UpperCAmelCase = word
return word
def a ( self : Optional[Any] , _lowercase : List[Any] ):
__UpperCAmelCase = []
__UpperCAmelCase = re.findall(r'''\S+\n?''' , __lowerCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(__lowerCamelCase ).split(''' ''' ) ) )
return split_tokens
def a ( self : str , _lowercase : Optional[int] ):
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def a ( self : List[Any] , _lowercase : List[str] ):
return self.decoder.get(__lowerCamelCase , self.unk_token )
def a ( self : Union[str, Any] , _lowercase : str ):
__UpperCAmelCase = ''' '''.join(__lowerCamelCase ).replace('''@@ ''' , '''''' ).strip()
return out_string
def a ( self : Dict , _lowercase : str , _lowercase : Optional[str] = None ):
if not os.path.isdir(__lowerCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCAmelCase = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__UpperCAmelCase = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file , __lowerCamelCase )
if os.path.abspath(self.merges_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.merges_file , __lowerCamelCase )
return out_vocab_file, out_merge_file
def a ( self : int , _lowercase : Tuple ):
if isinstance(__lowerCamelCase , __lowerCamelCase ):
try:
with open(__lowerCamelCase , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(__lowerCamelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(F'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
__UpperCAmelCase = f.readlines()
for lineTmp in lines:
__UpperCAmelCase = lineTmp.strip()
__UpperCAmelCase = line.rfind(''' ''' )
if idx == -1:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt>\'''' )
__UpperCAmelCase = line[:idx]
__UpperCAmelCase = len(self.encoder )
| 332 |
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = 0
@slow
def lowercase_ ( self : List[str] ) -> Any:
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(__lowerCamelCase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(__lowerCamelCase ) , 0 )
def lowercase_ ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowercase_ ( self : List[str] ) -> Dict:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def lowercase_ ( self : Dict ) -> Any:
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
# Check that tokenizer_type ≠ model_type
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , config=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowercase_ ( self : Tuple ) -> Dict:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(__lowerCamelCase , '''vocab.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''bert''' , use_fast=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(__lowerCamelCase , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(__lowerCamelCase , '''merges.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''gpt2''' , use_fast=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
@require_tokenizers
def lowercase_ ( self : Optional[int] ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(__lowerCamelCase , '''vocab.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''bert''' )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(__lowerCamelCase , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(__lowerCamelCase , '''merges.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''gpt2''' )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Union[str, Any] ) -> int:
with pytest.raises(__lowerCamelCase ):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' )
@require_tokenizers
def lowercase_ ( self : Optional[int] ) -> Tuple:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
SCREAMING_SNAKE_CASE__ = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , __lowerCamelCase )
else:
self.assertEqual(tokenizer.do_lower_case , __lowerCamelCase )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def lowercase_ ( self : Any ) -> str:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
__lowerCamelCase , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
SCREAMING_SNAKE_CASE__ = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def lowercase_ ( self : List[str] ) -> Tuple:
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
SCREAMING_SNAKE_CASE__ = TOKENIZER_MAPPING.values()
SCREAMING_SNAKE_CASE__ = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(__lowerCamelCase )
@require_tokenizers
def lowercase_ ( self : Optional[int] ) -> Any:
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=__lowerCamelCase ) , __lowerCamelCase )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , __lowerCamelCase )
@require_tokenizers
def lowercase_ ( self : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''Hello, world. How are you?'''
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__lowerCamelCase )
self.assertEqual('''[UNK]''' , tokens[0] )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__lowerCamelCase )
self.assertEqual('''[UNK]''' , tokens[0] )
@require_tokenizers
def lowercase_ ( self : Dict ) -> int:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 3_0000 )
self.assertEqual(tokenizer.unk_token , '''[UNK]''' )
self.assertEqual(tokenizer.padding_side , '''right''' )
self.assertEqual(tokenizer.truncation_side , '''right''' )
def lowercase_ ( self : List[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def lowercase_ ( self : Optional[int] ) -> Any:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : List[Any] ) -> Optional[int]:
# Check we can load the tokenizer config of an online model.
SCREAMING_SNAKE_CASE__ = get_tokenizer_config('''bert-base-cased''' )
SCREAMING_SNAKE_CASE__ = config.pop('''_commit_hash''' , __lowerCamelCase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(__lowerCamelCase , {'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
SCREAMING_SNAKE_CASE__ = get_tokenizer_config(__lowerCamelCase )
self.assertDictEqual(__lowerCamelCase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = get_tokenizer_config(__lowerCamelCase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' )
def lowercase_ ( self : int ) -> str:
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCamelCase ):
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = CustomTokenizer.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowercase_ ( self : List[Any] ) -> List[Any]:
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
# Can register in two steps
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCamelCase ):
AutoTokenizer.register(__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ = BertTokenizerFast.from_pretrained(__lowerCamelCase )
bert_tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = CustomTokenizerFast.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase_ ( self : Dict ) -> Tuple:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , trust_remote_code=__lowerCamelCase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
@require_tokenizers
def lowercase_ ( self : List[str] ) -> str:
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = False
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = NewTokenizer
a = False
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
AutoTokenizer.register(__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase_ ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def lowercase_ ( self : Union[str, Any] ) -> Dict:
with self.assertRaisesRegex(
__lowerCamelCase , '''bert-base is not a local folder and is not a valid model identifier''' ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''bert-base''' )
def lowercase_ ( self : Dict ) -> Optional[int]:
with self.assertRaisesRegex(
__lowerCamelCase , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , revision='''aaaaaa''' )
def lowercase_ ( self : Any ) -> Optional[Any]:
# Make sure we have cached the tokenizer.
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 314 | 0 |
"""simple docstring"""
import qiskit
def lowerCamelCase_ ( _lowerCamelCase = 2 ):
lowerCamelCase__ : Optional[Any] = qubits
# Using Aer's simulator
lowerCamelCase__ : Dict = qiskit.Aer.get_backend('aer_simulator' )
# Creating a Quantum Circuit acting on the q register
lowerCamelCase__ : int = qiskit.QuantumCircuit(_lowerCamelCase , _lowerCamelCase )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , _lowerCamelCase ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , _lowerCamelCase )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(_lowerCamelCase ) ) , list(range(_lowerCamelCase ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
lowerCamelCase__ : Optional[Any] = qiskit.execute(_lowerCamelCase , _lowerCamelCase , shots=1000 )
return job.result().get_counts(_lowerCamelCase )
if __name__ == "__main__":
print(f"Total count for various states are: {quantum_entanglement(3)}")
| 316 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
A_ : str = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : bool = field(default=snake_case_ , metadata={'help': 'Whether to use SortishSampler or not.'} )
lowerCamelCase__ : bool = field(
default=snake_case_ , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
lowerCamelCase__ : Optional[int] = field(
default=snake_case_ , metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
} , )
lowerCamelCase__ : Optional[int] = field(
default=snake_case_ , metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
} , )
lowerCamelCase__ : Optional[Union[str, Path, GenerationConfig]] = field(
default=snake_case_ , metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
} , )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = super().to_dict()
for k, v in d.items():
if isinstance(lowerCamelCase_, lowerCamelCase_ ):
lowerCamelCase__ : Any = v.to_dict()
return d
| 316 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__:str = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Tuple = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE__:List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 261 | """simple docstring"""
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase( a , a , a , a="attention" ):
__a = params[F"{prefix}/layers_{i}/{layer_name}/key/kernel"]
__a = params[F"{prefix}/layers_{i}/{layer_name}/out/kernel"]
__a = params[F"{prefix}/layers_{i}/{layer_name}/query/kernel"]
__a = params[F"{prefix}/layers_{i}/{layer_name}/value/kernel"]
return k, o, q, v
def _lowerCamelCase( a , a , a , a=False ):
if split_mlp_wi:
__a = params[F"{prefix}/layers_{i}/mlp/wi_0/kernel"]
__a = params[F"{prefix}/layers_{i}/mlp/wi_1/kernel"]
__a = (wi_a, wi_a)
else:
__a = params[F"{prefix}/layers_{i}/mlp/wi/kernel"]
__a = params[F"{prefix}/layers_{i}/mlp/wo/kernel"]
return wi, wo
def _lowerCamelCase( a , a , a , a ):
return params[F"{prefix}/layers_{i}/{layer_name}/scale"]
def _lowerCamelCase( a , *, a , a ):
__a = traverse_util.flatten_dict(variables["target"] )
__a = {"/".join(a ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__a = "encoder/layers_0/mlp/wi_0/kernel" in old
print("Split MLP:" , a )
__a = collections.OrderedDict()
# Shared embeddings.
__a = old["token_embedder/embedding"]
# Encoder.
for i in range(a ):
# Block i, layer 0 (Self Attention).
__a = tax_layer_norm_lookup(a , a , "encoder" , "pre_attention_layer_norm" )
__a , __a , __a , __a = tax_attention_lookup(a , a , "encoder" , "attention" )
__a = layer_norm
__a = k.T
__a = o.T
__a = q.T
__a = v.T
# Block i, layer 1 (MLP).
__a = tax_layer_norm_lookup(a , a , "encoder" , "pre_mlp_layer_norm" )
__a , __a = tax_mlp_lookup(a , a , "encoder" , a )
__a = layer_norm
if split_mlp_wi:
__a = wi[0].T
__a = wi[1].T
else:
__a = wi.T
__a = wo.T
__a = old[
"encoder/relpos_bias/rel_embedding"
].T
__a = old["encoder/encoder_norm/scale"]
if not is_encoder_only:
# Decoder.
for i in range(a ):
# Block i, layer 0 (Self Attention).
__a = tax_layer_norm_lookup(a , a , "decoder" , "pre_self_attention_layer_norm" )
__a , __a , __a , __a = tax_attention_lookup(a , a , "decoder" , "self_attention" )
__a = layer_norm
__a = k.T
__a = o.T
__a = q.T
__a = v.T
# Block i, layer 1 (Cross Attention).
__a = tax_layer_norm_lookup(a , a , "decoder" , "pre_cross_attention_layer_norm" )
__a , __a , __a , __a = tax_attention_lookup(a , a , "decoder" , "encoder_decoder_attention" )
__a = layer_norm
__a = k.T
__a = o.T
__a = q.T
__a = v.T
# Block i, layer 2 (MLP).
__a = tax_layer_norm_lookup(a , a , "decoder" , "pre_mlp_layer_norm" )
__a , __a = tax_mlp_lookup(a , a , "decoder" , a )
__a = layer_norm
if split_mlp_wi:
__a = wi[0].T
__a = wi[1].T
else:
__a = wi.T
__a = wo.T
__a = old["decoder/decoder_norm/scale"]
__a = old[
"decoder/relpos_bias/rel_embedding"
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__a = old["decoder/logits_dense/kernel"].T
return new
def _lowerCamelCase( a , a ):
__a = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__a = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__a = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
__a = state_dict["shared.weight"]
return state_dict
def _lowerCamelCase( a , a , a , a ):
__a = checkpoints.load_tax_checkpoint(a )
__a = convert_tax_to_pytorch(a , num_layers=config.num_layers , is_encoder_only=a )
__a = make_state_dict(a , a )
model.load_state_dict(a , strict=a )
def _lowerCamelCase( a , a , a , a = False ):
__a = TaConfig.from_json_file(a )
print(F"Building PyTorch model from configuration: {config}" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__a = TaEncoderModel(a )
else:
__a = TaForConditionalGeneration(a )
# Load weights from tf checkpoint
load_tax_weights_in_ta(a , a , a , a )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(a )
# Verify that we can load the checkpoint.
model.from_pretrained(a )
print("Done" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Tuple = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
SCREAMING_SNAKE_CASE__:Tuple = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 261 | 1 |
'''simple docstring'''
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None ) ->Dict:
"""simple docstring"""
A = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
A = True, True
A = dfs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return path
def __a ( UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
A = 0
A = -1
for i in range(_lowerCamelCase ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
A = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def __a ( UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
A = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
A = check_circuit_or_path(_lowerCamelCase , _lowerCamelCase )
if check == 3:
print("""graph is not Eulerian""" )
print("""no path""" )
return
A = 1
if check == 2:
A = odd_node
print("""graph has a Euler path""" )
if check == 1:
print("""graph has a Euler cycle""" )
A = dfs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
print(_lowerCamelCase )
def __a ( ) ->List[Any]:
"""simple docstring"""
A = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
A = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
A = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
A = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
A = {
1: [],
2: []
# all degree is zero
}
A = 10
check_euler(_lowerCamelCase , _lowerCamelCase )
check_euler(_lowerCamelCase , _lowerCamelCase )
check_euler(_lowerCamelCase , _lowerCamelCase )
check_euler(_lowerCamelCase , _lowerCamelCase )
check_euler(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
main()
| 370 |
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def A (self : Optional[Any] ):
A = torch.nn.Linear(10 , 10 )
A = torch.optim.SGD(model.parameters() , 0.1 )
A = Accelerator()
A = accelerator.prepare(_lowerCAmelCase )
try:
pickle.loads(pickle.dumps(_lowerCAmelCase ) )
except Exception as e:
self.fail(F"""Accelerated optimizer pickling failed with {e}""" )
AcceleratorState._reset_state()
| 337 | 0 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
UpperCamelCase = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class snake_case_ ( datasets.BuilderConfig ):
__A : Optional[datasets.Features] = None
def lowercase_ ( _lowerCamelCase : "pyspark.sql.DataFrame" , _lowerCamelCase : List[int] , ):
import pyspark
def generate_fn():
lowercase__ : int = df.select("*" , pyspark.sql.functions.spark_partition_id().alias("part_id"))
for partition_id in partition_order:
lowercase__ : str = df_with_partition_id.select("*").where(f'''part_id = {partition_id}''').drop("part_id")
lowercase__ : Dict = partition_df.collect()
lowercase__ : Any = 0
for row in rows:
yield f'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class snake_case_ ( _BaseExamplesIterable ):
def __init__( self : Any , lowercase_ : "pyspark.sql.DataFrame" , lowercase_ : str=None , ) -> int:
lowercase__ : Optional[int] = df
lowercase__ : int = partition_order or range(self.df.rdd.getNumPartitions() )
lowercase__ : Any = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : Any ) -> List[str]:
yield from self.generate_examples_fn()
def __UpperCamelCase ( self : Dict , lowercase_ : np.random.Generator ) -> "SparkExamplesIterable":
lowercase__ : Optional[Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(lowercase_ )
return SparkExamplesIterable(self.df , partition_order=lowercase_ )
def __UpperCamelCase ( self : Any , lowercase_ : int , lowercase_ : int ) -> "SparkExamplesIterable":
lowercase__ : Any = self.split_shard_indices_by_worker(lowercase_ , lowercase_ )
return SparkExamplesIterable(self.df , partition_order=lowercase_ )
@property
def __UpperCamelCase ( self : Tuple ) -> int:
return len(self.partition_order )
class snake_case_ ( datasets.DatasetBuilder ):
__A : Any = SparkConfig
def __init__( self : List[Any] , lowercase_ : "pyspark.sql.DataFrame" , lowercase_ : str = None , lowercase_ : str = None , **lowercase_ : List[str] , ) -> Union[str, Any]:
import pyspark
lowercase__ : List[str] = pyspark.sql.SparkSession.builder.getOrCreate()
lowercase__ : Optional[int] = df
lowercase__ : int = working_dir
super().__init__(
cache_dir=lowercase_ , config_name=str(self.df.semanticHash() ) , **lowercase_ , )
def __UpperCamelCase ( self : str ) -> Optional[Any]:
# Returns the path of the created file.
def create_cache_and_write_probe(lowercase_ : Optional[Any] ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=lowercase_ )
lowercase__ : Union[str, Any] = os.path.join(self._cache_dir , "fs_test" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(lowercase_ , "a" )
return [probe_file]
if self._spark.conf.get("spark.master" , "" ).startswith("local" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowercase__ : Dict = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(lowercase_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" )
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
return datasets.DatasetInfo(features=self.config.features )
def __UpperCamelCase ( self : Optional[int] , lowercase_ : datasets.download.download_manager.DownloadManager ) -> Tuple:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def __UpperCamelCase ( self : Optional[int] , lowercase_ : Optional[Any] ) -> str:
import pyspark
def get_arrow_batch_size(lowercase_ : List[str] ):
for batch in it:
yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]} )
lowercase__ : Tuple = self.df.count()
lowercase__ : Optional[Any] = df_num_rows if df_num_rows <= 1_00 else 1_00
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowercase__ : Optional[Any] = (
self.df.limit(lowercase_ )
.repartition(1 )
.mapInArrow(lowercase_ , "batch_bytes: long" )
.agg(pyspark.sql.functions.sum("batch_bytes" ).alias("sample_bytes" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowercase__ : Optional[Any] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowercase__ : int = min(lowercase_ , int(approx_total_size / max_shard_size ) )
lowercase__ : Union[str, Any] = self.df.repartition(lowercase_ )
def __UpperCamelCase ( self : List[str] , lowercase_ : str , lowercase_ : str , lowercase_ : int , ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
import pyspark
lowercase__ : List[str] = ParquetWriter if file_format == "parquet" else ArrowWriter
lowercase__ : str = os.path.join(self._working_dir , os.path.basename(lowercase_ ) ) if self._working_dir else fpath
lowercase__ : Optional[Any] = file_format == "parquet"
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowercase__ : Union[str, Any] = self.config.features
lowercase__ : Any = self._writer_batch_size
lowercase__ : Dict = self._fs.storage_options
def write_arrow(lowercase_ : List[str] ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowercase__ : str = pyspark.TaskContext().taskAttemptId()
lowercase__ : str = next(lowercase_ , lowercase_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["task_id", "num_examples", "num_bytes"] , )
lowercase__ : Any = 0
lowercase__ : Optional[int] = writer_class(
features=lowercase_ , path=working_fpath.replace("SSSSS" , F'''{shard_id:05d}''' ).replace("TTTTT" , F'''{task_id:05d}''' ) , writer_batch_size=lowercase_ , storage_options=lowercase_ , embed_local_files=lowercase_ , )
lowercase__ : str = pa.Table.from_batches([first_batch] )
writer.write_table(lowercase_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowercase__ , lowercase__ : Tuple = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
shard_id += 1
lowercase__ : Dict = writer_class(
features=writer._features , path=working_fpath.replace("SSSSS" , F'''{shard_id:05d}''' ).replace("TTTTT" , F'''{task_id:05d}''' ) , writer_batch_size=lowercase_ , storage_options=lowercase_ , embed_local_files=lowercase_ , )
lowercase__ : List[str] = pa.Table.from_batches([batch] )
writer.write_table(lowercase_ )
if writer._num_bytes > 0:
lowercase__ , lowercase__ : Dict = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(lowercase_ ) ):
lowercase__ : Optional[Any] = os.path.join(os.path.dirname(lowercase_ ) , os.path.basename(lowercase_ ) )
shutil.move(lowercase_ , lowercase_ )
lowercase__ : int = (
self.df.mapInArrow(lowercase_ , "task_id: long, num_examples: long, num_bytes: long" )
.groupBy("task_id" )
.agg(
pyspark.sql.functions.sum("num_examples" ).alias("total_num_examples" ) , pyspark.sql.functions.sum("num_bytes" ).alias("total_num_bytes" ) , pyspark.sql.functions.count("num_bytes" ).alias("num_shards" ) , pyspark.sql.functions.collect_list("num_examples" ).alias("shard_lengths" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def __UpperCamelCase ( self : Optional[int] , lowercase_ : "datasets.SplitGenerator" , lowercase_ : str = "arrow" , lowercase_ : Optional[Union[str, int]] = None , lowercase_ : Optional[int] = None , **lowercase_ : int , ) -> Optional[Any]:
self._validate_cache_dir()
lowercase__ : Union[str, Any] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(lowercase_ )
lowercase__ : Tuple = not is_remote_filesystem(self._fs )
lowercase__ : Optional[Any] = os.path.join if is_local else posixpath.join
lowercase__ : List[Any] = "-TTTTT-SSSSS-of-NNNNN"
lowercase__ : Any = F'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
lowercase__ : str = path_join(self._output_dir , lowercase_ )
lowercase__ : Any = 0
lowercase__ : List[Any] = 0
lowercase__ : Dict = 0
lowercase__ : List[str] = []
lowercase__ : List[Any] = []
for task_id, content in self._prepare_split_single(lowercase_ , lowercase_ , lowercase_ ):
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : Optional[int] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(lowercase_ )
lowercase__ : List[Any] = total_num_examples
lowercase__ : List[str] = total_num_bytes
# should rename everything at the end
logger.debug(F'''Renaming {total_shards} shards.''' )
if total_shards > 1:
lowercase__ : Tuple = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowercase__ : Any = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
lowercase_ : int , lowercase_ : int , lowercase_ : int , ):
rename(
lowercase_ , fpath.replace("SSSSS" , F'''{shard_id:05d}''' ).replace("TTTTT" , F'''{task_id:05d}''' ) , fpath.replace("TTTTT-SSSSS" , F'''{global_shard_id:05d}''' ).replace("NNNNN" , F'''{total_shards:05d}''' ) , )
lowercase__ : Optional[Any] = []
lowercase__ : int = 0
for i in range(len(lowercase_ ) ):
lowercase__ , lowercase__ : Optional[int] = task_id_and_num_shards[i]
for shard_id in range(lowercase_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(lowercase_ , len(lowercase_ ) ).map(lambda lowercase_ : _rename_shard(*lowercase_ ) ).collect()
else:
# don't use any pattern
lowercase__ : str = 0
lowercase__ : str = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("SSSSS" , F'''{shard_id:05d}''' ).replace("TTTTT" , F'''{task_id:05d}''' ) , fpath.replace(lowercase_ , "" ) , )
def __UpperCamelCase ( self : Optional[int] , lowercase_ : "datasets.SplitGenerator" , ) -> SparkExamplesIterable:
return SparkExamplesIterable(self.df )
| 87 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
@dataclass
class a_ :
lowercase = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
lowercase = field(
default=lowerCamelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
lowercase = field(
default=lowerCamelCase , metadata={"""help""": """The column name of the images in the files."""} )
lowercase = field(default=lowerCamelCase , metadata={"""help""": """A folder containing the training data."""} )
lowercase = field(default=lowerCamelCase , metadata={"""help""": """A folder containing the validation data."""} )
lowercase = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
lowercase = field(
default=lowerCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
lowercase = field(
default=lowerCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = {}
if self.train_dir is not None:
UpperCamelCase = self.train_dir
if self.validation_dir is not None:
UpperCamelCase = self.validation_dir
UpperCamelCase = data_files if data_files else None
@dataclass
class a_ :
lowercase = field(
default=lowerCamelCase , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
lowercase = field(
default=lowerCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} )
lowercase = field(
default=lowerCamelCase , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
lowercase = field(
default=lowerCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
lowercase = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
lowercase = field(default=lowerCamelCase , metadata={"""help""": """Name or path of preprocessor config."""} )
lowercase = field(
default=lowerCamelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
lowercase = field(
default=0.75 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} )
lowercase = field(
default=lowerCamelCase , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} )
@dataclass
class a_ ( lowerCamelCase ):
lowercase = field(
default=1E-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} )
def lowercase__ ( __UpperCamelCase )-> int:
UpperCamelCase = torch.stack([example["""pixel_values"""] for example in examples] )
return {"pixel_values": pixel_values}
def lowercase__ ( )-> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase ,UpperCamelCase ,UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase ,UpperCamelCase ,UpperCamelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mae""" , __UpperCamelCase , __UpperCamelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase = training_args.get_process_log_level()
logger.setLevel(__UpperCamelCase )
transformers.utils.logging.set_verbosity(__UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
UpperCamelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
UpperCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCamelCase = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __UpperCamelCase ) and data_args.train_val_split > 0.0:
UpperCamelCase = ds["""train"""].train_test_split(data_args.train_val_split )
UpperCamelCase = split["""train"""]
UpperCamelCase = split["""test"""]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
UpperCamelCase = ViTMAEConfig.from_pretrained(model_args.config_name , **__UpperCamelCase )
elif model_args.model_name_or_path:
UpperCamelCase = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **__UpperCamelCase )
else:
UpperCamelCase = ViTMAEConfig()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(F"New config: {config}" )
# adapt config
config.update(
{
"""mask_ratio""": model_args.mask_ratio,
"""norm_pix_loss""": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
UpperCamelCase = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **__UpperCamelCase )
elif model_args.model_name_or_path:
UpperCamelCase = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **__UpperCamelCase )
else:
UpperCamelCase = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
UpperCamelCase = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
UpperCamelCase = ViTMAEForPreTraining(__UpperCamelCase )
if training_args.do_train:
UpperCamelCase = ds["""train"""].column_names
else:
UpperCamelCase = ds["""validation"""].column_names
if data_args.image_column_name is not None:
UpperCamelCase = data_args.image_column_name
elif "image" in column_names:
UpperCamelCase = """image"""
elif "img" in column_names:
UpperCamelCase = """img"""
else:
UpperCamelCase = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
UpperCamelCase = image_processor.size["""shortest_edge"""]
else:
UpperCamelCase = (image_processor.size["""height"""], image_processor.size["""width"""])
UpperCamelCase = Compose(
[
Lambda(lambda __UpperCamelCase : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(__UpperCamelCase , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(__UpperCamelCase ):
UpperCamelCase = [transforms(__UpperCamelCase ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
UpperCamelCase = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(__UpperCamelCase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
UpperCamelCase = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(__UpperCamelCase )
# Compute absolute learning rate
UpperCamelCase = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
UpperCamelCase = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
UpperCamelCase = Trainer(
model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=__UpperCamelCase , data_collator=__UpperCamelCase , )
# Training
if training_args.do_train:
UpperCamelCase = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase = last_checkpoint
UpperCamelCase = trainer.train(resume_from_checkpoint=__UpperCamelCase )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase = trainer.evaluate()
trainer.log_metrics("""eval""" , __UpperCamelCase )
trainer.save_metrics("""eval""" , __UpperCamelCase )
# Write model card and (optionally) push to hub
UpperCamelCase = {
"""tasks""": """masked-auto-encoding""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-auto-encoding"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__UpperCamelCase )
else:
trainer.create_model_card(**__UpperCamelCase )
def lowercase__ ( __UpperCamelCase )-> List[str]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 321 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class __magic_name__ ( unittest.TestCase ):
def __lowercase ( self : Any ):
_a : Optional[int] = 10
def __lowercase ( self : Optional[int] ):
_a : List[str] = [1, 2, 3, 4]
_a : Optional[int] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(lowercase_ ,self.block_size ,0 ) ,lowercase_ )
def __lowercase ( self : List[str] ):
_a : str = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
_a : str = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(lowercase_ ,self.block_size ,0 ) ,lowercase_ )
def __lowercase ( self : Optional[int] ):
_a : Any = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
_a : Optional[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(lowercase_ ,self.block_size ,0 ) ,lowercase_ )
def __lowercase ( self : Dict ):
_a : Optional[int] = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this."
_a : Optional[int] = process_story(lowercase_ )
self.assertEqual(lowercase_ ,[] )
def __lowercase ( self : str ):
_a : List[Any] = ""
_a : Optional[int] = process_story(lowercase_ )
self.assertEqual(lowercase_ ,[] )
self.assertEqual(lowercase_ ,[] )
def __lowercase ( self : Optional[Any] ):
_a : str = (
"It was the year of Our Lord one thousand seven hundred and "
"seventy-five\n\nSpiritual revelations were conceded to England "
"at that favoured period, as at this.\n@highlight\n\nIt was the best of times"
)
_a : Optional[int] = process_story(lowercase_ )
_a : str = [
"It was the year of Our Lord one thousand seven hundred and seventy-five.",
"Spiritual revelations were conceded to England at that favoured period, as at this.",
]
self.assertEqual(lowercase_ ,lowercase_ )
_a : str = ["It was the best of times."]
self.assertEqual(lowercase_ ,lowercase_ )
def __lowercase ( self : Union[str, Any] ):
_a : List[Any] = torch.tensor([1, 2, 3, 4] )
_a : List[str] = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(lowercase_ ,0 ).numpy() ,expected.numpy() )
def __lowercase ( self : List[str] ):
_a : List[str] = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
_a : int = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowercase_ ,23 ).numpy() ,expected.numpy() )
def __lowercase ( self : List[Any] ):
_a : int = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
_a : Optional[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowercase_ ,1 ).numpy() ,expected.numpy() )
def __lowercase ( self : Dict ):
_a : Optional[int] = 101
_a : Tuple = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
_a : Tuple = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
_a : Optional[int] = compute_token_type_ids(lowercase_ ,lowercase_ )
np.testing.assert_array_equal(lowercase_ ,lowercase_ )
| 351 |
'''simple docstring'''
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__lowerCAmelCase = logging.get_logger(__name__)
class __magic_name__ ( enum.Enum ):
lowerCAmelCase : Union[str, Any] = 0
lowerCAmelCase : Dict = 1
@add_end_docstrings(_UpperCamelCase )
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : int = 'generated'
def __init__( self : List[Any] ,*_UpperCAmelCase : Any ,**_UpperCAmelCase : Optional[int] ):
super().__init__(*_UpperCAmelCase ,**_UpperCAmelCase )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : List[Any]=None ,_UpperCAmelCase : Tuple=None ,_UpperCAmelCase : List[Any]=None ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : Dict=None ,_UpperCAmelCase : Optional[int]=None ,**_UpperCAmelCase : List[Any] ,):
_a : Tuple = {}
if truncation is not None:
_a : Union[str, Any] = truncation
_a : List[str] = generate_kwargs
_a : Optional[Any] = {}
if return_tensors is not None and return_type is None:
_a : Optional[int] = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
_a : List[Any] = return_type
if clean_up_tokenization_spaces is not None:
_a : Optional[int] = clean_up_tokenization_spaces
if stop_sequence is not None:
_a : List[str] = self.tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase )
if len(_UpperCAmelCase ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
_a : List[Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __lowercase ( self : Any ,_UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int ):
return True
def __lowercase ( self : str ,*_UpperCAmelCase : Any ,_UpperCAmelCase : Union[str, Any] ):
_a : Optional[Any] = self.model.config.prefix if self.model.config.prefix is not None else ''
if isinstance(args[0] ,_UpperCAmelCase ):
if self.tokenizer.pad_token_id is None:
raise ValueError('Please make sure that the tokenizer has a pad_token_id when using a batch input' )
_a : Optional[Any] = ([prefix + arg for arg in args[0]],)
_a : Any = True
elif isinstance(args[0] ,_UpperCAmelCase ):
_a : List[str] = (prefix + args[0],)
_a : str = False
else:
raise ValueError(
F""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
_a : Optional[Any] = self.tokenizer(*_UpperCAmelCase ,padding=_UpperCAmelCase ,truncation=_UpperCAmelCase ,return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : Optional[int] ,*_UpperCAmelCase : List[Any] ,**_UpperCAmelCase : Any ):
_a : Tuple = super().__call__(*_UpperCAmelCase ,**_UpperCAmelCase )
if (
isinstance(args[0] ,_UpperCAmelCase )
and all(isinstance(_UpperCAmelCase ,_UpperCAmelCase ) for el in args[0] )
and all(len(_UpperCAmelCase ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def __lowercase ( self : Tuple ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : List[str]=TruncationStrategy.DO_NOT_TRUNCATE ,**_UpperCAmelCase : int ):
_a : Dict = self._parse_and_tokenize(_UpperCAmelCase ,truncation=_UpperCAmelCase ,**_UpperCAmelCase )
return inputs
def __lowercase ( self : Any ,_UpperCAmelCase : str ,**_UpperCAmelCase : Tuple ):
if self.framework == "pt":
_a , _a : int = model_inputs['input_ids'].shape
elif self.framework == "tf":
_a , _a : Dict = tf.shape(model_inputs['input_ids'] ).numpy()
_a : Optional[Any] = generate_kwargs.get('min_length' ,self.model.config.min_length )
_a : Optional[int] = generate_kwargs.get('max_length' ,self.model.config.max_length )
self.check_inputs(_UpperCAmelCase ,generate_kwargs['min_length'] ,generate_kwargs['max_length'] )
_a : List[str] = self.model.generate(**_UpperCAmelCase ,**_UpperCAmelCase )
_a : int = output_ids.shape[0]
if self.framework == "pt":
_a : int = output_ids.reshape(_UpperCAmelCase ,out_b // in_b ,*output_ids.shape[1:] )
elif self.framework == "tf":
_a : Any = tf.reshape(_UpperCAmelCase ,(in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def __lowercase ( self : Dict ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Any=ReturnType.TEXT ,_UpperCAmelCase : Dict=False ):
_a : Union[str, Any] = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
_a : int = {F"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
_a : str = {
F"""{self.return_name}_text""": self.tokenizer.decode(
_UpperCAmelCase ,skip_special_tokens=_UpperCAmelCase ,clean_up_tokenization_spaces=_UpperCAmelCase ,)
}
records.append(_UpperCAmelCase )
return records
@add_end_docstrings(_UpperCamelCase )
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : Any = 'summary'
def __call__( self : Any ,*_UpperCAmelCase : Union[str, Any] ,**_UpperCAmelCase : List[str] ):
return super().__call__(*_UpperCAmelCase ,**_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int ):
if max_length < min_length:
logger.warning(F"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
F"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
'a summarization task, where outputs shorter than the input are typically wanted, you might '
F"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(_UpperCamelCase )
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : List[Any] = 'translation'
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int ):
if input_length > 0.9 * max_length:
logger.warning(
F"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
'increasing your max_length manually, e.g. translator(\'...\', max_length=400)' )
return True
def __lowercase ( self : Any ,*_UpperCAmelCase : Any ,_UpperCAmelCase : List[str]=TruncationStrategy.DO_NOT_TRUNCATE ,_UpperCAmelCase : Optional[int]=None ,_UpperCAmelCase : Any=None ):
if getattr(self.tokenizer ,'_build_translation_inputs' ,_UpperCAmelCase ):
return self.tokenizer._build_translation_inputs(
*_UpperCAmelCase ,return_tensors=self.framework ,truncation=_UpperCAmelCase ,src_lang=_UpperCAmelCase ,tgt_lang=_UpperCAmelCase )
else:
return super()._parse_and_tokenize(*_UpperCAmelCase ,truncation=_UpperCAmelCase )
def __lowercase ( self : Tuple ,_UpperCAmelCase : List[Any]=None ,_UpperCAmelCase : List[str]=None ,**_UpperCAmelCase : Dict ):
_a , _a , _a : str = super()._sanitize_parameters(**_UpperCAmelCase )
if src_lang is not None:
_a : Optional[int] = src_lang
if tgt_lang is not None:
_a : List[Any] = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
_a : int = kwargs.get('task' ,self.task )
_a : int = task.split('_' )
if task and len(_UpperCAmelCase ) == 4:
# translation, XX, to YY
_a : List[Any] = items[1]
_a : Any = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : Optional[Any] ,*_UpperCAmelCase : Tuple ,**_UpperCAmelCase : Optional[int] ):
return super().__call__(*_UpperCAmelCase ,**_UpperCAmelCase )
| 107 | 0 |
"""simple docstring"""
import sys
from collections import defaultdict
class __A :
'''simple docstring'''
def __init__( self : List[str] ) ->Optional[int]:
"""simple docstring"""
snake_case_ = []
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : int ) ->List[Any]:
"""simple docstring"""
return self.node_position[vertex]
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = pos
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
snake_case_ = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
snake_case_ = 2 * start + 1
else:
snake_case_ = 2 * start + 2
if heap[smallest_child] < heap[start]:
snake_case_ , snake_case_ = heap[smallest_child], positions[smallest_child]
snake_case_ , snake_case_ = (
heap[start],
positions[start],
)
snake_case_ , snake_case_ = temp, tempa
snake_case_ = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , UpperCAmelCase_ )
self.top_to_bottom(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] ) ->str:
"""simple docstring"""
snake_case_ = position[index]
while index != 0:
snake_case_ = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
snake_case_ = heap[parent]
snake_case_ = position[parent]
self.set_position(position[parent] , UpperCAmelCase_ )
else:
snake_case_ = val
snake_case_ = temp
self.set_position(UpperCAmelCase_ , UpperCAmelCase_ )
break
snake_case_ = parent
else:
snake_case_ = val
snake_case_ = temp
self.set_position(UpperCAmelCase_ , 0 )
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] ) ->int:
"""simple docstring"""
snake_case_ = len(UpperCAmelCase_ ) // 2 - 1
for i in range(UpperCAmelCase_ , -1 , -1 ):
self.top_to_bottom(UpperCAmelCase_ , UpperCAmelCase_ , len(UpperCAmelCase_ ) , UpperCAmelCase_ )
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = positions[0]
snake_case_ = sys.maxsize
self.top_to_bottom(UpperCAmelCase_ , 0 , len(UpperCAmelCase_ ) , UpperCAmelCase_ )
return temp
def _a ( _SCREAMING_SNAKE_CASE ) -> Any:
snake_case_ = Heap()
snake_case_ = [0] * len(_SCREAMING_SNAKE_CASE )
snake_case_ = [-1] * len(_SCREAMING_SNAKE_CASE ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
snake_case_ = [] # Heap of Distance of vertices from their neighboring vertex
snake_case_ = []
for vertex in range(len(_SCREAMING_SNAKE_CASE ) ):
distance_tv.append(sys.maxsize )
positions.append(_SCREAMING_SNAKE_CASE )
heap.node_position.append(_SCREAMING_SNAKE_CASE )
snake_case_ = []
snake_case_ = 1
snake_case_ = sys.maxsize
for neighbor, distance in adjacency_list[0]:
snake_case_ = 0
snake_case_ = distance
heap.heapify(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for _ in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
snake_case_ = heap.delete_minimum(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
snake_case_ = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_SCREAMING_SNAKE_CASE )]
):
snake_case_ = distance
heap.bottom_to_top(
_SCREAMING_SNAKE_CASE , heap.get_position(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__SCREAMING_SNAKE_CASE : Dict = int(input('Enter number of edges: ').strip())
__SCREAMING_SNAKE_CASE : Optional[Any] = defaultdict(list)
for _ in range(edges_number):
__SCREAMING_SNAKE_CASE : Dict = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 347 |
"""simple docstring"""
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
__SCREAMING_SNAKE_CASE : int = sys.version_info >= (3, 10)
def _a ( _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE )
@dataclass
class __A :
'''simple docstring'''
__lowercase: int
__lowercase: float
__lowercase: str
__lowercase: bool
@dataclass
class __A :
'''simple docstring'''
__lowercase: int = 42
__lowercase: str = field(default="""toto""" , metadata={"""help""": """help message"""})
@dataclass
class __A :
'''simple docstring'''
__lowercase: bool = False
__lowercase: bool = True
__lowercase: Optional[bool] = None
class __A (snake_case__):
'''simple docstring'''
__lowercase: str = """titi"""
__lowercase: Any = """toto"""
class __A (snake_case__):
'''simple docstring'''
__lowercase: int = """titi"""
__lowercase: Optional[Any] = """toto"""
__lowercase: List[Any] = 42
@dataclass
class __A :
'''simple docstring'''
__lowercase: BasicEnum = "toto"
def lowerCAmelCase ( self : int ) ->List[Any]:
"""simple docstring"""
snake_case_ = BasicEnum(self.foo )
@dataclass
class __A :
'''simple docstring'''
__lowercase: MixedTypeEnum = "toto"
def lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = MixedTypeEnum(self.foo )
@dataclass
class __A :
'''simple docstring'''
__lowercase: Optional[int] = None
__lowercase: Optional[float] = field(default=snake_case__ , metadata={"""help""": """help message"""})
__lowercase: Optional[str] = None
__lowercase: Optional[List[str]] = list_field(default=[])
__lowercase: Optional[List[int]] = list_field(default=[])
@dataclass
class __A :
'''simple docstring'''
__lowercase: List[int] = list_field(default=[])
__lowercase: List[int] = list_field(default=[1, 2, 3])
__lowercase: List[str] = list_field(default=["""Hallo""", """Bonjour""", """Hello"""])
__lowercase: List[float] = list_field(default=[0.1, 0.2, 0.3])
@dataclass
class __A :
'''simple docstring'''
__lowercase: List[int] = field()
__lowercase: str = field()
__lowercase: BasicEnum = field()
def lowerCAmelCase ( self : Any ) ->str:
"""simple docstring"""
snake_case_ = BasicEnum(self.required_enum )
@dataclass
class __A :
'''simple docstring'''
__lowercase: int
__lowercase: "BasicEnum" = field()
__lowercase: "Optional[bool]" = None
__lowercase: "str" = field(default="""toto""" , metadata={"""help""": """help message"""})
__lowercase: "List[str]" = list_field(default=["""Hallo""", """Bonjour""", """Hello"""])
if is_python_no_less_than_3_10:
@dataclass
class __A :
'''simple docstring'''
__lowercase: bool = False
__lowercase: bool = True
__lowercase: bool | None = None
@dataclass
class __A :
'''simple docstring'''
__lowercase: int | None = None
__lowercase: float | None = field(default=snake_case__ , metadata={"""help""": """help message"""})
__lowercase: str | None = None
__lowercase: list[str] | None = list_field(default=[])
__lowercase: list[int] | None = list_field(default=[])
class __A (unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : argparse.ArgumentParser , UpperCAmelCase_ : argparse.ArgumentParser ) ->Optional[int]:
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
snake_case_ = {k: v for k, v in vars(UpperCAmelCase_ ).items() if k != """container"""}
snake_case_ = {k: v for k, v in vars(UpperCAmelCase_ ).items() if k != """container"""}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("""choices""" , UpperCAmelCase_ ) and yy.get("""choices""" , UpperCAmelCase_ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["""type"""](UpperCAmelCase_ ) , yy["""type"""](UpperCAmelCase_ ) )
del xx["type"], yy["type"]
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : int ) ->Any:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ )
expected.add_argument("""--bar""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ )
expected.add_argument("""--baz""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ )
expected.add_argument("""--flag""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""]
((snake_case_) , ) = parser.parse_args_into_dataclasses(UpperCAmelCase_ , look_for_args_file=UpperCAmelCase_ )
self.assertFalse(example.flag )
def lowerCAmelCase ( self : Optional[Any] ) ->List[Any]:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=42 , type=UpperCAmelCase_ )
expected.add_argument("""--baz""" , default="""toto""" , type=UpperCAmelCase_ , help="""help message""" )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" )
expected.add_argument("""--baz""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("""--no_baz""" , action="""store_false""" , default=UpperCAmelCase_ , dest="""baz""" )
expected.add_argument("""--opt""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ )
snake_case_ = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(UpperCAmelCase_ )
for dataclass_type in dataclass_types:
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = parser.parse_args([] )
self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) )
snake_case_ = parser.parse_args(["""--foo""", """--no_baz"""] )
self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) )
snake_case_ = parser.parse_args(["""--foo""", """--baz"""] )
self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) )
snake_case_ = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] )
self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) )
snake_case_ = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] )
self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) )
def lowerCAmelCase ( self : int ) ->List[str]:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
snake_case_ = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
snake_case_ = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
snake_case_ = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
snake_case_ = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
snake_case_ = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def lowerCAmelCase ( self : Dict ) ->str:
"""simple docstring"""
@dataclass
class __A :
'''simple docstring'''
__lowercase: Literal["titi", "toto", 42] = "toto"
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
snake_case_ = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
snake_case_ = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
def lowerCAmelCase ( self : Optional[int] ) ->Dict:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = argparse.ArgumentParser()
expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ )
expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=UpperCAmelCase_ )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=UpperCAmelCase_ )
expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=UpperCAmelCase_ )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = parser.parse_args([] )
self.assertEqual(
UpperCAmelCase_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , )
snake_case_ = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() )
self.assertEqual(UpperCAmelCase_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) )
def lowerCAmelCase ( self : Optional[int] ) ->List[Any]:
"""simple docstring"""
snake_case_ = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ )
expected.add_argument("""--bar""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help="""help message""" )
expected.add_argument("""--baz""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ )
expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ )
expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ )
snake_case_ = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(UpperCAmelCase_ )
for dataclass_type in dataclass_types:
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = parser.parse_args([] )
self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , bar=UpperCAmelCase_ , baz=UpperCAmelCase_ , ces=[] , des=[] ) )
snake_case_ = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() )
self.assertEqual(UpperCAmelCase_ , Namespace(foo=12 , bar=3.14 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) )
def lowerCAmelCase ( self : Optional[Any] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = argparse.ArgumentParser()
expected.add_argument("""--required_list""" , nargs="""+""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ )
expected.add_argument("""--required_str""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=UpperCAmelCase_ , )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] ) ->List[Any]:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=UpperCAmelCase_ , )
expected.add_argument("""--opt""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ )
expected.add_argument("""--baz""" , default="""toto""" , type=UpperCAmelCase_ , help="""help message""" )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=UpperCAmelCase_ )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : Dict ) ->Tuple:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
snake_case_ = parser.parse_dict(UpperCAmelCase_ )[0]
snake_case_ = BasicExample(**UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] ) ->List[Any]:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
"""extra""": 42,
}
self.assertRaises(UpperCAmelCase_ , parser.parse_dict , UpperCAmelCase_ , allow_extra_keys=UpperCAmelCase_ )
def lowerCAmelCase ( self : Any ) ->Dict:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = os.path.join(UpperCAmelCase_ , """temp_json""" )
os.mkdir(UpperCAmelCase_ )
with open(temp_local_path + """.json""" , """w+""" ) as f:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0]
snake_case_ = BasicExample(**UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = os.path.join(UpperCAmelCase_ , """temp_yaml""" )
os.mkdir(UpperCAmelCase_ )
with open(temp_local_path + """.yaml""" , """w+""" ) as f:
yaml.dump(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0]
snake_case_ = BasicExample(**UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : Dict ) ->Any:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
| 347 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE__ ( metaclass=_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ["torch", "scipy"]
def __init__( self : Dict , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[Any] ):
"""simple docstring"""
requires_backends(self , ["torch", "scipy"] )
@classmethod
def lowerCamelCase_ ( cls : List[str] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ["torch", "scipy"] )
@classmethod
def lowerCamelCase_ ( cls : str , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Optional[int] ):
"""simple docstring"""
requires_backends(cls , ["torch", "scipy"] )
| 371 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : Dict = logging.get_logger(__name__)
lowerCAmelCase__ : int = {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json"
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = '''fnet'''
def __init__( self : Tuple , UpperCAmelCase_ : str=32_000 , UpperCAmelCase_ : List[str]=768 , UpperCAmelCase_ : List[str]=12 , UpperCAmelCase_ : str=3_072 , UpperCAmelCase_ : List[str]="gelu_new" , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Optional[Any]=512 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : List[Any]=1e-12 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Tuple=512 , UpperCAmelCase_ : int=3 , UpperCAmelCase_ : Dict=1 , UpperCAmelCase_ : List[Any]=2 , **UpperCAmelCase_ : Tuple , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : List[str] = max_position_embeddings
__UpperCAmelCase : List[Any] = hidden_size
__UpperCAmelCase : Optional[int] = num_hidden_layers
__UpperCAmelCase : Optional[int] = intermediate_size
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : List[str] = hidden_dropout_prob
__UpperCAmelCase : Tuple = initializer_range
__UpperCAmelCase : Union[str, Any] = type_vocab_size
__UpperCAmelCase : List[Any] = layer_norm_eps
__UpperCAmelCase : Optional[Any] = use_tpu_fourier_optimizations
__UpperCAmelCase : List[Any] = tpu_short_seq_length
| 37 | 0 |
"""simple docstring"""
from collections.abc import Generator
def _A ( ):
"""simple docstring"""
a , a =0, 1
while True:
a , a =b, a + b
yield b
def _A ( lowercase = 10_00 ):
"""simple docstring"""
a =1
a =fibonacci_generator()
while len(str(next(lowercase ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 81 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase_ : Any = logging.get_logger(__name__)
lowerCamelCase_ : Tuple = {"""vocab_file""": """sentencepiece.bpe.model"""}
lowerCamelCase_ : str = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
}
lowerCamelCase_ : Optional[int] = {
"""moussaKam/mbarthez""": 1_0_2_4,
"""moussaKam/barthez""": 1_0_2_4,
"""moussaKam/barthez-orangesum-title""": 1_0_2_4,
}
lowerCamelCase_ : Tuple = """▁"""
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ["input_ids", "attention_mask"]
def __init__( self , __A , __A="<s>" , __A="</s>" , __A="</s>" , __A="<s>" , __A="<unk>" , __A="<pad>" , __A="<mask>" , __A = None , **__A , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
a =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
a ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , )
a =vocab_file
a =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__A ) )
a ={'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
a =len(self.sp_model ) - 1
a ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def SCREAMING_SNAKE_CASE ( self , __A , __A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a =[self.cls_token_id]
a =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , __A , __A = None , __A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is None:
return [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1]
def SCREAMING_SNAKE_CASE ( self , __A , __A = None ) -> List[int]:
a =[self.sep_token_id]
a =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self ) -> Any:
return len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self ) -> int:
a ={self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self , __A ) -> List[str]:
return self.sp_model.encode(__A , out_type=__A )
def SCREAMING_SNAKE_CASE ( self , __A ) -> Dict:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a =self.sp_model.PieceToId(__A )
return spm_id if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE ( self , __A ) -> Optional[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(__A )
def SCREAMING_SNAKE_CASE ( self , __A ) -> Tuple:
a =[]
a =''''''
a =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__A ) + token
a =True
a =[]
else:
current_sub_tokens.append(__A )
a =False
out_string += self.sp_model.decode(__A )
return out_string.strip()
def __getstate__( self ) -> Tuple:
a =self.__dict__.copy()
a =None
return state
def __setstate__( self , __A ) -> Tuple:
a =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
a ={}
a =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self , __A , __A = None ) -> Tuple[str]:
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
a =os.path.join(
__A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __A )
elif not os.path.isfile(self.vocab_file ):
with open(__A , '''wb''' ) as fi:
a =self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,) | 81 | 1 |
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ):
'''simple docstring'''
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = len(set_a.intersection(lowerCAmelCase__ ) )
if alternative_union:
lowercase = len(lowerCAmelCase__ ) + len(lowerCAmelCase__ )
else:
lowercase = len(set_a.union(lowerCAmelCase__ ) )
return intersection / union
if isinstance(lowerCAmelCase__ , (list, tuple) ) and isinstance(lowerCAmelCase__ , (list, tuple) ):
lowercase = [element for element in set_a if element in set_b]
if alternative_union:
lowercase = len(lowerCAmelCase__ ) + len(lowerCAmelCase__ )
return len(lowerCAmelCase__ ) / union
else:
lowercase = set_a + [element for element in set_b if element not in set_a]
return len(lowerCAmelCase__ ) / len(lowerCAmelCase__ )
return len(lowerCAmelCase__ ) / len(lowerCAmelCase__ )
return None
if __name__ == "__main__":
lowercase__ :Tuple = {"a", "b", "c", "d", "e"}
lowercase__ :List[Any] = {"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 97 |
lowercase__ :List[str] = 6_5521
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = 1
lowercase = 0
for plain_chr in plain_text:
lowercase = (a + ord(lowerCAmelCase__ )) % MOD_ADLER
lowercase = (b + a) % MOD_ADLER
return (b << 16) | a
| 97 | 1 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowercase_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A__ : Dict = BarthezTokenizer
A__ : List[Any] = BarthezTokenizerFast
A__ : int = True
A__ : str = True
def lowerCamelCase_ ( self ):
"""simple docstring"""
super().setUp()
UpperCamelCase_ = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__UpperCamelCase )
UpperCamelCase_ = tokenizer
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = """<pad>"""
UpperCamelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(__UpperCamelCase ) , 1_0_1_1_2_2 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_1_2_2 )
@require_torch
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
UpperCamelCase_ = [0, 5_7, 3_0_1_8, 7_0_3_0_7, 9_1, 2]
UpperCamelCase_ = self.tokenizer(
__UpperCamelCase , max_length=len(__UpperCamelCase ) , padding=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="""pt""" )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
UpperCamelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = self.get_rust_tokenizer()
UpperCamelCase_ = """I was born in 92000, and this is falsé."""
UpperCamelCase_ = tokenizer.tokenize(__UpperCamelCase )
UpperCamelCase_ = rust_tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
UpperCamelCase_ = rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ = self.get_rust_tokenizer()
UpperCamelCase_ = tokenizer.encode(__UpperCamelCase )
UpperCamelCase_ = rust_tokenizer.encode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = {"""input_ids""": [[0, 4_9_0, 1_4_3_2_8, 4_5_0_7, 3_5_4, 4_7, 4_3_6_6_9, 9_5, 2_5, 7_8_1_1_7, 2_0_2_1_5, 1_9_7_7_9, 1_9_0, 2_2, 4_0_0, 4, 3_5_3_4_3, 8_0_3_1_0, 6_0_3, 8_6, 2_4_9_3_7, 1_0_5, 3_3_4_3_8, 9_4_7_6_2, 1_9_6, 3_9_6_4_2, 7, 1_5, 1_5_9_3_3, 1_7_3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0_5_3_4, 8_7, 2_5, 6_6, 3_3_5_8, 1_9_6, 5_5_2_8_9, 8, 8_2_9_6_1, 8_1, 2_2_0_4, 7_5_2_0_3, 7, 1_5, 7_6_3, 1_2_9_5_6, 2_1_6, 1_7_8, 1_4_3_2_8, 9_5_9_5, 1_3_7_7, 6_9_6_9_3, 7, 4_4_8, 7_1_0_2_1, 1_9_6, 1_8_1_0_6, 1_4_3_7, 1_3_9_7_4, 1_0_8, 9_0_8_3, 4, 4_9_3_1_5, 7, 3_9, 8_6, 1_3_2_6, 2_7_9_3, 4_6_3_3_3, 4, 4_4_8, 1_9_6, 7_4_5_8_8, 7, 4_9_3_1_5, 7, 3_9, 2_1, 8_2_2, 3_8_4_7_0, 7_4, 2_1, 6_6_7_2_3, 6_2_4_8_0, 8, 2_2_0_5_0, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
UpperCamelCase_ = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=__UpperCamelCase , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=__UpperCamelCase , )
| 122 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
A__ : torch.FloatTensor
class lowercase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self , __UpperCamelCase = 3 , __UpperCamelCase = 3 , __UpperCamelCase = ("DownEncoderBlock2D",) , __UpperCamelCase = ("UpDecoderBlock2D",) , __UpperCamelCase = (6_4,) , __UpperCamelCase = 1 , __UpperCamelCase = "silu" , __UpperCamelCase = 3 , __UpperCamelCase = 3_2 , __UpperCamelCase = 2_5_6 , __UpperCamelCase = 3_2 , __UpperCamelCase = None , __UpperCamelCase = 0.18_215 , __UpperCamelCase = "group" , ):
"""simple docstring"""
super().__init__()
# pass init params to Encoder
UpperCamelCase_ = Encoder(
in_channels=__UpperCamelCase , out_channels=__UpperCamelCase , down_block_types=__UpperCamelCase , block_out_channels=__UpperCamelCase , layers_per_block=__UpperCamelCase , act_fn=__UpperCamelCase , norm_num_groups=__UpperCamelCase , double_z=__UpperCamelCase , )
UpperCamelCase_ = vq_embed_dim if vq_embed_dim is not None else latent_channels
UpperCamelCase_ = nn.Convad(__UpperCamelCase , __UpperCamelCase , 1 )
UpperCamelCase_ = VectorQuantizer(__UpperCamelCase , __UpperCamelCase , beta=0.25 , remap=__UpperCamelCase , sane_index_shape=__UpperCamelCase )
UpperCamelCase_ = nn.Convad(__UpperCamelCase , __UpperCamelCase , 1 )
# pass init params to Decoder
UpperCamelCase_ = Decoder(
in_channels=__UpperCamelCase , out_channels=__UpperCamelCase , up_block_types=__UpperCamelCase , block_out_channels=__UpperCamelCase , layers_per_block=__UpperCamelCase , act_fn=__UpperCamelCase , norm_num_groups=__UpperCamelCase , norm_type=__UpperCamelCase , )
@apply_forward_hook
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = True ):
"""simple docstring"""
UpperCamelCase_ = self.encoder(__UpperCamelCase )
UpperCamelCase_ = self.quant_conv(__UpperCamelCase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__UpperCamelCase )
@apply_forward_hook
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = False , __UpperCamelCase = True ):
"""simple docstring"""
if not force_not_quantize:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.quantize(__UpperCamelCase )
else:
UpperCamelCase_ = h
UpperCamelCase_ = self.post_quant_conv(__UpperCamelCase )
UpperCamelCase_ = self.decoder(__UpperCamelCase , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__UpperCamelCase )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = True ):
"""simple docstring"""
UpperCamelCase_ = sample
UpperCamelCase_ = self.encode(__UpperCamelCase ).latents
UpperCamelCase_ = self.decode(__UpperCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__UpperCamelCase )
| 122 | 1 |
"""simple docstring"""
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class __lowerCamelCase ( A__ , A__ ):
'''simple docstring'''
a_ : Union[str, Any] = 1
@register_to_config
def __init__( self : Optional[Any] , a_ : str=20_00 , a_ : Union[str, Any]=0.1 , a_ : int=20 , a_ : Optional[Any]=1e-3 ):
lowerCAmelCase_ : Dict = None
lowerCAmelCase_ : Optional[Any] = None
lowerCAmelCase_ : Optional[int] = None
def lowerCamelCase ( self : str , a_ : int , a_ : Union[str, torch.device] = None ):
lowerCAmelCase_ : int = torch.linspace(1 , self.config.sampling_eps , a_ , device=a_ )
def lowerCamelCase ( self : int , a_ : Optional[Any] , a_ : Optional[Any] , a_ : Dict , a_ : Any=None ):
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
lowerCAmelCase_ : List[Any] = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
lowerCAmelCase_ : int = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
lowerCAmelCase_ : Optional[int] = std.flatten()
while len(std.shape ) < len(score.shape ):
lowerCAmelCase_ : Optional[int] = std.unsqueeze(-1 )
lowerCAmelCase_ : Optional[Any] = -score / std
# compute
lowerCAmelCase_ : Optional[int] = -1.0 / len(self.timesteps )
lowerCAmelCase_ : Any = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
lowerCAmelCase_ : Optional[int] = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
lowerCAmelCase_ : List[str] = beta_t.unsqueeze(-1 )
lowerCAmelCase_ : Optional[int] = -0.5 * beta_t * x
lowerCAmelCase_ : List[Any] = torch.sqrt(a_ )
lowerCAmelCase_ : str = drift - diffusion**2 * score
lowerCAmelCase_ : Tuple = x + drift * dt
# add noise
lowerCAmelCase_ : List[str] = randn_tensor(x.shape , layout=x.layout , generator=a_ , device=x.device , dtype=x.dtype )
lowerCAmelCase_ : Any = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : Any ):
return self.config.num_train_timesteps
| 356 |
"""simple docstring"""
def __lowerCamelCase ( __UpperCamelCase ) -> bool:
"""simple docstring"""
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
lowercase__ = int(input("""Enter number: """).strip())
print(F"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
| 161 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : str ) -> bool:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =len(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =len(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =[[False for _ in range(m + 1 )] for _ in range(n + 1 )]
_SCREAMING_SNAKE_CASE =True
for i in range(_UpperCamelCase ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
_SCREAMING_SNAKE_CASE =True
if a[i].islower():
_SCREAMING_SNAKE_CASE =True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 47 |
"""simple docstring"""
import math
def lowercase (_lowerCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase (_lowerCAmelCase = 0.1 ):
__lowerCAmelCase = 3
__lowerCAmelCase = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_lowerCAmelCase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 301 | 0 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt')
a : List[Any] = logging.getLogger(__name__)
@dataclass
class a :
snake_case_ = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case_ = field(
default=_lowerCamelCase , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
snake_case_ = field(
default=_lowerCamelCase , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
snake_case_ = field(
default=_lowerCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
snake_case_ = field(
default=_lowerCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
snake_case_ = field(
default=_lowerCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
@dataclass
class a :
snake_case_ = field(
default=_lowerCamelCase , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
snake_case_ = field(
default=_lowerCamelCase , metadata={"help": "Evaluation language. Also train language if `train_language` is set to None."} )
snake_case_ = field(
default=_lowerCamelCase , metadata={"help": "Train language if it is different from the evaluation language."} )
snake_case_ = field(
default=_lowerCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case_ = field(
default=_lowerCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
snake_case_ = field(
default=_lowerCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
snake_case_ = field(
default=_lowerCamelCase , metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"} , )
snake_case_ = field(
default=_lowerCamelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
snake_case_ = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
snake_case_ = field(
default=_lowerCamelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
snake_case_ = field(
default=_lowerCamelCase , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def __magic_name__ ( ) -> List[str]:
'''simple docstring'''
snake_case_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case_ ,snake_case_ ,snake_case_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_xnli''', __UpperCAmelCase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
snake_case_ = training_args.get_process_log_level()
logger.setLevel(__UpperCAmelCase )
datasets.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
snake_case_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
snake_case_ = load_dataset(
'''xnli''', model_args.language, split='''train''', cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
else:
snake_case_ = load_dataset(
'''xnli''', model_args.train_language, split='''train''', cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
snake_case_ = train_dataset.features['''label'''].names
if training_args.do_eval:
snake_case_ = load_dataset(
'''xnli''', model_args.language, split='''validation''', cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
snake_case_ = eval_dataset.features['''label'''].names
if training_args.do_predict:
snake_case_ = load_dataset(
'''xnli''', model_args.language, split='''test''', cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
snake_case_ = predict_dataset.features['''label'''].names
# Labels
snake_case_ = len(__UpperCAmelCase )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=__UpperCAmelCase, idalabel={str(__UpperCAmelCase ): label for i, label in enumerate(__UpperCAmelCase )}, labelaid={label: i for i, label in enumerate(__UpperCAmelCase )}, finetuning_task='''xnli''', cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
snake_case_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, do_lower_case=model_args.do_lower_case, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
snake_case_ = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=__UpperCAmelCase, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
snake_case_ = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
snake_case_ = False
def preprocess_function(__UpperCAmelCase ):
# Tokenize the texts
return tokenizer(
examples['''premise'''], examples['''hypothesis'''], padding=__UpperCAmelCase, max_length=data_args.max_seq_length, truncation=__UpperCAmelCase, )
if training_args.do_train:
if data_args.max_train_samples is not None:
snake_case_ = min(len(__UpperCAmelCase ), data_args.max_train_samples )
snake_case_ = train_dataset.select(range(__UpperCAmelCase ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
snake_case_ = train_dataset.map(
__UpperCAmelCase, batched=__UpperCAmelCase, load_from_cache_file=not data_args.overwrite_cache, desc='''Running tokenizer on train dataset''', )
# Log a few random samples from the training set:
for index in random.sample(range(len(__UpperCAmelCase ) ), 3 ):
logger.info(F"Sample {index} of the training set: {train_dataset[index]}." )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
snake_case_ = min(len(__UpperCAmelCase ), data_args.max_eval_samples )
snake_case_ = eval_dataset.select(range(__UpperCAmelCase ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
snake_case_ = eval_dataset.map(
__UpperCAmelCase, batched=__UpperCAmelCase, load_from_cache_file=not data_args.overwrite_cache, desc='''Running tokenizer on validation dataset''', )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
snake_case_ = min(len(__UpperCAmelCase ), data_args.max_predict_samples )
snake_case_ = predict_dataset.select(range(__UpperCAmelCase ) )
with training_args.main_process_first(desc='''prediction dataset map pre-processing''' ):
snake_case_ = predict_dataset.map(
__UpperCAmelCase, batched=__UpperCAmelCase, load_from_cache_file=not data_args.overwrite_cache, desc='''Running tokenizer on prediction dataset''', )
# Get the metric function
snake_case_ = evaluate.load('''xnli''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__UpperCAmelCase ):
snake_case_ = p.predictions[0] if isinstance(p.predictions, __UpperCAmelCase ) else p.predictions
snake_case_ = np.argmax(__UpperCAmelCase, axis=1 )
return metric.compute(predictions=__UpperCAmelCase, references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
snake_case_ = default_data_collator
elif training_args.fpaa:
snake_case_ = DataCollatorWithPadding(__UpperCAmelCase, pad_to_multiple_of=8 )
else:
snake_case_ = None
# Initialize our Trainer
snake_case_ = Trainer(
model=__UpperCAmelCase, args=__UpperCAmelCase, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, compute_metrics=__UpperCAmelCase, tokenizer=__UpperCAmelCase, data_collator=__UpperCAmelCase, )
# Training
if training_args.do_train:
snake_case_ = None
if training_args.resume_from_checkpoint is not None:
snake_case_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case_ = last_checkpoint
snake_case_ = trainer.train(resume_from_checkpoint=__UpperCAmelCase )
snake_case_ = train_result.metrics
snake_case_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__UpperCAmelCase )
)
snake_case_ = min(__UpperCAmelCase, len(__UpperCAmelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''', __UpperCAmelCase )
trainer.save_metrics('''train''', __UpperCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
snake_case_ = trainer.evaluate(eval_dataset=__UpperCAmelCase )
snake_case_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__UpperCAmelCase )
snake_case_ = min(__UpperCAmelCase, len(__UpperCAmelCase ) )
trainer.log_metrics('''eval''', __UpperCAmelCase )
trainer.save_metrics('''eval''', __UpperCAmelCase )
# Prediction
if training_args.do_predict:
logger.info('''*** Predict ***''' )
snake_case_ ,snake_case_ ,snake_case_ = trainer.predict(__UpperCAmelCase, metric_key_prefix='''predict''' )
snake_case_ = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(__UpperCAmelCase )
)
snake_case_ = min(__UpperCAmelCase, len(__UpperCAmelCase ) )
trainer.log_metrics('''predict''', __UpperCAmelCase )
trainer.save_metrics('''predict''', __UpperCAmelCase )
snake_case_ = np.argmax(__UpperCAmelCase, axis=1 )
snake_case_ = os.path.join(training_args.output_dir, '''predictions.txt''' )
if trainer.is_world_process_zero():
with open(__UpperCAmelCase, '''w''' ) as writer:
writer.write('''index\tprediction\n''' )
for index, item in enumerate(__UpperCAmelCase ):
snake_case_ = label_list[item]
writer.write(F"{index}\t{item}\n" )
if __name__ == "__main__":
main()
| 72 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a ( unittest.TestCase ):
@property
def A_ ( self : Tuple ):
torch.manual_seed(0 )
snake_case_ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def A_ ( self : Dict ):
snake_case_ = self.dummy_uncond_unet
snake_case_ = ScoreSdeVeScheduler()
snake_case_ = ScoreSdeVePipeline(unet=lowercase_ , scheduler=lowercase_ )
sde_ve.to(lowercase_ )
sde_ve.set_progress_bar_config(disable=lowercase_ )
snake_case_ = torch.manual_seed(0 )
snake_case_ = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=lowercase_ ).images
snake_case_ = torch.manual_seed(0 )
snake_case_ = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=lowercase_ , return_dict=lowercase_ )[
0
]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case_ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class a ( unittest.TestCase ):
def A_ ( self : Optional[int] ):
snake_case_ = '''google/ncsnpp-church-256'''
snake_case_ = UNetaDModel.from_pretrained(lowercase_ )
snake_case_ = ScoreSdeVeScheduler.from_pretrained(lowercase_ )
snake_case_ = ScoreSdeVePipeline(unet=lowercase_ , scheduler=lowercase_ )
sde_ve.to(lowercase_ )
sde_ve.set_progress_bar_config(disable=lowercase_ )
snake_case_ = torch.manual_seed(0 )
snake_case_ = sde_ve(num_inference_steps=10 , output_type='''numpy''' , generator=lowercase_ ).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
snake_case_ = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 72 | 1 |
'''simple docstring'''
from math import ceil, sqrt
def a__ ( lowercase : int = 1000000 ) -> str:
"""simple docstring"""
_UpperCamelCase = 0
for outer_width in range(3, (limit // 4) + 2 ):
if outer_width**2 > limit:
_UpperCamelCase = max(ceil(sqrt(outer_width**2 - limit ) ), 1 )
else:
_UpperCamelCase = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(F"""{solution() = }""")
| 324 |
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Optional[int] , lowerCAmelCase__ : int | None = None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = value
__SCREAMING_SNAKE_CASE : Node | None = None # Added in order to delete a node easier
__SCREAMING_SNAKE_CASE : Node | None = None
__SCREAMING_SNAKE_CASE : Node | None = None
def __repr__( self : Optional[Any] ):
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F"{self.value}": (self.left, self.right)} , indent=1 )
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Optional[int] , lowerCAmelCase__ : Node | None = None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = root
def __str__( self : Union[str, Any] ):
"""simple docstring"""
return str(self.root )
def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : Node , lowerCAmelCase__ : Node | None ):
"""simple docstring"""
if new_children is not None: # reset its kids
__SCREAMING_SNAKE_CASE : List[str] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(lowerCAmelCase__ ): # If it is the right children
__SCREAMING_SNAKE_CASE : Any = new_children
else:
__SCREAMING_SNAKE_CASE : int = new_children
else:
__SCREAMING_SNAKE_CASE : int = new_children
def UpperCamelCase__ ( self : str , lowerCAmelCase__ : Node ):
"""simple docstring"""
if node.parent and node.parent.right:
return node == node.parent.right
return False
def UpperCamelCase__ ( self : List[Any] ):
"""simple docstring"""
return self.root is None
def UpperCamelCase__ ( self : Optional[Any] , lowerCAmelCase__ : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = Node(lowerCAmelCase__ ) # create a new Node
if self.empty(): # if Tree is empty
__SCREAMING_SNAKE_CASE : Optional[int] = new_node # set its root
else: # Tree is not empty
__SCREAMING_SNAKE_CASE : Optional[int] = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
__SCREAMING_SNAKE_CASE : List[str] = new_node # We insert the new node in a leaf
break
else:
__SCREAMING_SNAKE_CASE : Any = parent_node.left
else:
if parent_node.right is None:
__SCREAMING_SNAKE_CASE : Tuple = new_node
break
else:
__SCREAMING_SNAKE_CASE : List[str] = parent_node.right
__SCREAMING_SNAKE_CASE : Tuple = parent_node
def UpperCamelCase__ ( self : str , *lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
for value in values:
self.__insert(lowerCAmelCase__ )
def UpperCamelCase__ ( self : List[str] , lowerCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
if self.empty():
raise IndexError("""Warning: Tree is empty! please use another.""" )
else:
__SCREAMING_SNAKE_CASE : List[Any] = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
__SCREAMING_SNAKE_CASE : Any = node.left if value < node.value else node.right
return node
def UpperCamelCase__ ( self : List[Any] , lowerCAmelCase__ : Node | None = None ):
"""simple docstring"""
if node is None:
if self.root is None:
return None
__SCREAMING_SNAKE_CASE : Optional[Any] = self.root
if not self.empty():
while node.right is not None:
__SCREAMING_SNAKE_CASE : Tuple = node.right
return node
def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : Node | None = None ):
"""simple docstring"""
if node is None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.root
if self.root is None:
return None
if not self.empty():
__SCREAMING_SNAKE_CASE : Optional[Any] = self.root
while node.left is not None:
__SCREAMING_SNAKE_CASE : Any = node.left
return node
def UpperCamelCase__ ( self : str , lowerCAmelCase__ : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = self.search(lowerCAmelCase__ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(lowerCAmelCase__ , lowerCAmelCase__ )
elif node.left is None: # Has only right children
self.__reassign_nodes(lowerCAmelCase__ , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(lowerCAmelCase__ , node.left )
else:
__SCREAMING_SNAKE_CASE : Tuple = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
__SCREAMING_SNAKE_CASE : Optional[Any] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def UpperCamelCase__ ( self : Tuple , lowerCAmelCase__ : Node | None ):
"""simple docstring"""
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def UpperCamelCase__ ( self : Optional[int] , lowerCAmelCase__ : Optional[Any]=None ):
"""simple docstring"""
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def UpperCamelCase__ ( self : str , lowerCAmelCase__ : list , lowerCAmelCase__ : Node | None ):
"""simple docstring"""
if node:
self.inorder(lowerCAmelCase__ , node.left )
arr.append(node.value )
self.inorder(lowerCAmelCase__ , node.right )
def UpperCamelCase__ ( self : int , lowerCAmelCase__ : int , lowerCAmelCase__ : Node ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : list[int] = []
self.inorder(lowerCAmelCase__ , lowerCAmelCase__ ) # append all values to list using inorder traversal
return arr[k - 1]
def lowerCAmelCase_ ( _lowerCamelCase: Node | None ):
__SCREAMING_SNAKE_CASE : Optional[Any] = []
if curr_node is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def lowerCAmelCase_ ( ):
__SCREAMING_SNAKE_CASE : str = (8, 3, 6, 1, 10, 14, 13, 4, 7)
__SCREAMING_SNAKE_CASE : Dict = BinarySearchTree()
for i in testlist:
t.insert(_lowerCamelCase )
# Prints all the elements of the list in order traversal
print(_lowerCamelCase )
if t.search(6 ) is not None:
print("""The value 6 exists""" )
else:
print("""The value 6 doesn't exist""" )
if t.search(-1 ) is not None:
print("""The value -1 exists""" )
else:
print("""The value -1 doesn't exist""" )
if not t.empty():
print("""Max Value: """ , t.get_max().value ) # type: ignore
print("""Min Value: """ , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(_lowerCamelCase )
print(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) | 112 | 0 |
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def UpperCamelCase ( self ):
A__ = get_activation('''swish''' )
self.assertIsInstance(a__,nn.SiLU )
self.assertEqual(act(torch.tensor(-100,dtype=torch.floataa ) ).item(),0 )
self.assertNotEqual(act(torch.tensor(-1,dtype=torch.floataa ) ).item(),0 )
self.assertEqual(act(torch.tensor(0,dtype=torch.floataa ) ).item(),0 )
self.assertEqual(act(torch.tensor(20,dtype=torch.floataa ) ).item(),20 )
def UpperCamelCase ( self ):
A__ = get_activation('''silu''' )
self.assertIsInstance(a__,nn.SiLU )
self.assertEqual(act(torch.tensor(-100,dtype=torch.floataa ) ).item(),0 )
self.assertNotEqual(act(torch.tensor(-1,dtype=torch.floataa ) ).item(),0 )
self.assertEqual(act(torch.tensor(0,dtype=torch.floataa ) ).item(),0 )
self.assertEqual(act(torch.tensor(20,dtype=torch.floataa ) ).item(),20 )
def UpperCamelCase ( self ):
A__ = get_activation('''mish''' )
self.assertIsInstance(a__,nn.Mish )
self.assertEqual(act(torch.tensor(-200,dtype=torch.floataa ) ).item(),0 )
self.assertNotEqual(act(torch.tensor(-1,dtype=torch.floataa ) ).item(),0 )
self.assertEqual(act(torch.tensor(0,dtype=torch.floataa ) ).item(),0 )
self.assertEqual(act(torch.tensor(20,dtype=torch.floataa ) ).item(),20 )
def UpperCamelCase ( self ):
A__ = get_activation('''gelu''' )
self.assertIsInstance(a__,nn.GELU )
self.assertEqual(act(torch.tensor(-100,dtype=torch.floataa ) ).item(),0 )
self.assertNotEqual(act(torch.tensor(-1,dtype=torch.floataa ) ).item(),0 )
self.assertEqual(act(torch.tensor(0,dtype=torch.floataa ) ).item(),0 )
self.assertEqual(act(torch.tensor(20,dtype=torch.floataa ) ).item(),20 )
| 359 |
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = '''MCTCTFeatureExtractor'''
__SCREAMING_SNAKE_CASE = '''AutoTokenizer'''
def __init__( self,__lowerCamelCase,__lowerCamelCase ):
super().__init__(__lowerCamelCase,__lowerCamelCase )
A__ = self.feature_extractor
A__ = False
def __call__( self,*__lowerCamelCase,**__lowerCamelCase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__lowerCamelCase,**__lowerCamelCase )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
A__ = kwargs.pop('''raw_speech''' )
else:
A__ = kwargs.pop('''audio''',__lowerCamelCase )
A__ = kwargs.pop('''sampling_rate''',__lowerCamelCase )
A__ = kwargs.pop('''text''',__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
A__ = args[0]
A__ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
A__ = self.feature_extractor(__lowerCamelCase,*__lowerCamelCase,sampling_rate=__lowerCamelCase,**__lowerCamelCase )
if text is not None:
A__ = self.tokenizer(__lowerCamelCase,**__lowerCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
A__ = encodings['''input_ids''']
return inputs
def UpperCamelCase ( self,*__lowerCamelCase,**__lowerCamelCase ):
return self.tokenizer.batch_decode(*__lowerCamelCase,**__lowerCamelCase )
def UpperCamelCase ( self,*__lowerCamelCase,**__lowerCamelCase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*__lowerCamelCase,**__lowerCamelCase )
A__ = kwargs.pop('''input_features''',__lowerCamelCase )
A__ = kwargs.pop('''labels''',__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
A__ = args[0]
A__ = args[1:]
if input_features is not None:
A__ = self.feature_extractor.pad(__lowerCamelCase,*__lowerCamelCase,**__lowerCamelCase )
if labels is not None:
A__ = self.tokenizer.pad(__lowerCamelCase,**__lowerCamelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
A__ = labels['''input_ids''']
return input_features
def UpperCamelCase ( self,*__lowerCamelCase,**__lowerCamelCase ):
return self.tokenizer.decode(*__lowerCamelCase,**__lowerCamelCase )
@contextmanager
def UpperCamelCase ( self ):
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
A__ = True
A__ = self.tokenizer
yield
A__ = self.feature_extractor
A__ = False
| 39 | 0 |
"""simple docstring"""
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"""The RoBERTa Model transformer with early exiting (DeeRoBERTa). """ ,lowercase__ ,)
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = RobertaConfig
SCREAMING_SNAKE_CASE__ : int = """roberta"""
def __init__( self , lowercase_ ):
"""simple docstring"""
super().__init__(lowercase_ )
UpperCAmelCase_ : int = RobertaEmbeddings(lowercase_ )
self.init_weights()
@add_start_docstrings(
"""RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. """ ,lowercase__ ,)
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = RobertaConfig
SCREAMING_SNAKE_CASE__ : Optional[int] = """roberta"""
def __init__( self , lowercase_ ):
"""simple docstring"""
super().__init__(lowercase_ )
UpperCAmelCase_ : Any = config.num_labels
UpperCAmelCase_ : Optional[Any] = config.num_hidden_layers
UpperCAmelCase_ : int = DeeRobertaModel(lowercase_ )
UpperCAmelCase_ : Tuple = nn.Dropout(config.hidden_dropout_prob )
UpperCAmelCase_ : Dict = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(lowercase_ )
def UpperCamelCase__ ( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=-1 , lowercase_=False , ):
"""simple docstring"""
UpperCAmelCase_ : str = self.num_layers
try:
UpperCAmelCase_ : str = self.roberta(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , position_ids=lowercase_ , head_mask=lowercase_ , inputs_embeds=lowercase_ , )
UpperCAmelCase_ : List[str] = outputs[1]
UpperCAmelCase_ : Optional[Any] = self.dropout(lowercase_ )
UpperCAmelCase_ : List[str] = self.classifier(lowercase_ )
UpperCAmelCase_ : Tuple = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
UpperCAmelCase_ : Optional[int] = e.message
UpperCAmelCase_ : Optional[Any] = e.exit_layer
UpperCAmelCase_ : List[str] = outputs[0]
if not self.training:
UpperCAmelCase_ : Optional[int] = entropy(lowercase_ )
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : Dict = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
UpperCAmelCase_ : int = MSELoss()
UpperCAmelCase_ : Union[str, Any] = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
UpperCAmelCase_ : Dict = CrossEntropyLoss()
UpperCAmelCase_ : Any = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
UpperCAmelCase_ : List[str] = []
for highway_exit in outputs[-1]:
UpperCAmelCase_ : int = highway_exit[0]
if not self.training:
highway_logits_all.append(lowercase_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
UpperCAmelCase_ : str = MSELoss()
UpperCAmelCase_ : Optional[Any] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
UpperCAmelCase_ : List[Any] = CrossEntropyLoss()
UpperCAmelCase_ : Union[str, Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(lowercase_ )
if train_highway:
UpperCAmelCase_ : Tuple = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
UpperCAmelCase_ : Any = (loss,) + outputs
if not self.training:
UpperCAmelCase_ : Any = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
UpperCAmelCase_ : List[str] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 61 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list[int] , A__ : list[int] , A__ : list[int] , A__ : list[list[str]] , A__ : int , ):
'''simple docstring'''
__lowerCamelCase = len(A__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(A__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , A__ , A__ , )
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
__lowerCamelCase = []
depth_first_search([] , [] , [] , A__ , A__ )
# Print all the boards
for board in boards:
for column in board:
print(A__ )
print("""""" )
print(len(A__ ) , """solutions were found.""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 12 | 0 |
'''simple docstring'''
def lowerCamelCase (_SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , ):
__a : Dict = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('All input parameters must be positive' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('Relative densities cannot be greater than one' )
else:
__a : Optional[int] = 1 - (matter_density + radiation_density + dark_energy)
__a : str = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
__a : Union[str, Any] = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
__lowercase : Any = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 360 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = 'laion/clap-htsat-unfused'
__a : Optional[Any] = tempfile.mkdtemp()
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
return RobertaTokenizer.from_pretrained(self.checkpoint , **__a )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = self.get_tokenizer()
__a : List[str] = self.get_feature_extractor()
__a : Any = ClapProcessor(tokenizer=__a , feature_extractor=__a )
processor.save_pretrained(self.tmpdirname )
__a : Tuple = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
__a : int = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__a : List[str] = self.get_feature_extractor(do_normalize=__a , padding_value=1.0 )
__a : Tuple = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.get_feature_extractor()
__a : int = self.get_tokenizer()
__a : str = ClapProcessor(tokenizer=__a , feature_extractor=__a )
__a : int = floats_list((3, 1000) )
__a : str = feature_extractor(__a , return_tensors='np' )
__a : int = processor(audios=__a , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.get_feature_extractor()
__a : Any = self.get_tokenizer()
__a : Any = ClapProcessor(tokenizer=__a , feature_extractor=__a )
__a : Union[str, Any] = 'This is a test string'
__a : Union[str, Any] = processor(text=__a )
__a : Tuple = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.get_feature_extractor()
__a : str = self.get_tokenizer()
__a : List[str] = ClapProcessor(tokenizer=__a , feature_extractor=__a )
__a : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__a : Optional[int] = processor.batch_decode(__a )
__a : Optional[Any] = tokenizer.batch_decode(__a )
self.assertListEqual(__a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.get_feature_extractor()
__a : Optional[int] = self.get_tokenizer()
__a : int = ClapProcessor(tokenizer=__a , feature_extractor=__a )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
| 294 | 0 |
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[int] ) -> List[Any]:
UpperCamelCase__ : Any = SwinConfig(image_size=192 )
if "base" in model_name:
UpperCamelCase__ : Optional[int] = 6
UpperCamelCase__ : Optional[Any] = 128
UpperCamelCase__ : List[str] = (2, 2, 18, 2)
UpperCamelCase__ : str = (4, 8, 16, 32)
elif "large" in model_name:
UpperCamelCase__ : Dict = 12
UpperCamelCase__ : int = 192
UpperCamelCase__ : Any = (2, 2, 18, 2)
UpperCamelCase__ : Optional[int] = (6, 12, 24, 48)
else:
raise ValueError('''Model not supported, only supports base and large variants''' )
UpperCamelCase__ : List[Any] = window_size
UpperCamelCase__ : Dict = embed_dim
UpperCamelCase__ : Tuple = depths
UpperCamelCase__ : Any = num_heads
return config
def lowerCAmelCase_ ( __UpperCAmelCase: Any ) -> Dict:
if "encoder.mask_token" in name:
UpperCamelCase__ : List[Any] = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' )
if "encoder.patch_embed.proj" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "encoder.patch_embed.norm" in name:
UpperCamelCase__ : Dict = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' )
if "attn.proj" in name:
UpperCamelCase__ : Dict = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
UpperCamelCase__ : Tuple = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
UpperCamelCase__ : Any = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
UpperCamelCase__ : Tuple = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
UpperCamelCase__ : List[str] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCamelCase__ : Any = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
UpperCamelCase__ : Union[str, Any] = '''layernorm.weight'''
if name == "encoder.norm.bias":
UpperCamelCase__ : Dict = '''layernorm.bias'''
if "decoder" in name:
pass
else:
UpperCamelCase__ : List[str] = '''swin.''' + name
return name
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[Any] , __UpperCAmelCase: Any ) -> str:
for key in orig_state_dict.copy().keys():
UpperCamelCase__ : List[str] = orig_state_dict.pop(__UpperCAmelCase )
if "attn_mask" in key:
pass
elif "qkv" in key:
UpperCamelCase__ : Optional[Any] = key.split('''.''' )
UpperCamelCase__ : List[str] = int(key_split[2] )
UpperCamelCase__ : Tuple = int(key_split[4] )
UpperCamelCase__ : Tuple = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCamelCase__ : Optional[int] = val[:dim, :]
UpperCamelCase__ : List[str] = val[
dim : dim * 2, :
]
UpperCamelCase__ : Tuple = val[-dim:, :]
else:
UpperCamelCase__ : Union[str, Any] = val[
:dim
]
UpperCamelCase__ : int = val[
dim : dim * 2
]
UpperCamelCase__ : str = val[
-dim:
]
else:
UpperCamelCase__ : Optional[int] = val
return orig_state_dict
def lowerCAmelCase_ ( __UpperCAmelCase: Dict , __UpperCAmelCase: str , __UpperCAmelCase: Optional[int] , __UpperCAmelCase: Dict ) -> Union[str, Any]:
UpperCamelCase__ : Optional[Any] = torch.load(__UpperCAmelCase , map_location='''cpu''' )['''model''']
UpperCamelCase__ : Optional[Any] = get_swin_config(__UpperCAmelCase )
UpperCamelCase__ : Union[str, Any] = SwinForMaskedImageModeling(__UpperCAmelCase )
model.eval()
UpperCamelCase__ : Optional[Any] = convert_state_dict(__UpperCAmelCase , __UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
UpperCamelCase__ : List[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCamelCase__ : Dict = ViTImageProcessor(size={'''height''': 192, '''width''': 192} )
UpperCamelCase__ : Optional[Any] = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
UpperCamelCase__ : List[Any] = image_processor(images=__UpperCAmelCase , return_tensors='''pt''' )
with torch.no_grad():
UpperCamelCase__ : Optional[Any] = model(**__UpperCAmelCase ).logits
print(outputs.keys() )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__UpperCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__UpperCAmelCase )
if push_to_hub:
print(f"Pushing model and image processor for {model_name} to hub" )
model.push_to_hub(f"microsoft/{model_name}" )
image_processor.push_to_hub(f"microsoft/{model_name}" )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='swin-base-simmim-window6-192',
type=str,
choices=['swin-base-simmim-window6-192', 'swin-large-simmim-window12-192'],
help='Name of the Swin SimMIM model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth',
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCAmelCase_ = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 201 |
'''simple docstring'''
class UpperCAmelCase_ :
def __init__( self : List[str] , UpperCAmelCase__ : list[int] ) -> None:
lowerCAmelCase = len(UpperCAmelCase__ )
lowerCAmelCase = [0] * len_array
if len_array > 0:
lowerCAmelCase = array[0]
for i in range(1 , UpperCAmelCase__ ):
lowerCAmelCase = self.prefix_sum[i - 1] + array[i]
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : int ) -> bool:
lowerCAmelCase = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(UpperCAmelCase__ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 0 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str , __lowerCamelCase: float | Decimal , __lowerCamelCase: float = 10**-10 ):
'''simple docstring'''
lowercase_ = a
while True:
lowercase_ = Decimal(__lowerCamelCase ) - (
Decimal(eval(__lowerCamelCase ) ) / Decimal(eval(str(diff(__lowerCamelCase ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(__lowerCamelCase ) ) < precision: # noqa: S307
return float(__lowerCamelCase )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
print(f"""The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}""")
# Find Square Root of 5
print(f"""The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}""")
# Exponential Roots
print(f"""The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}""")
| 360 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class __lowerCamelCase ( snake_case_ , snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = 1
@register_to_config
def __init__( self , UpperCAmelCase = 1000 , UpperCAmelCase = None ) -> List[Any]:
'''simple docstring'''
self.set_timesteps(UpperCAmelCase )
# standard deviation of the initial noise distribution
lowercase_ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
lowercase_ = 4
# running values
lowercase_ = []
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Optional[int]:
'''simple docstring'''
lowercase_ = num_inference_steps
lowercase_ = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
lowercase_ = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
lowercase_ = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
lowercase_ = torch.sin(steps * math.pi / 2 ) ** 2
lowercase_ = (1.0 - self.betas**2) ** 0.5
lowercase_ = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
lowercase_ = timesteps.to(UpperCAmelCase )
lowercase_ = []
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = True , ) -> Union[SchedulerOutput, Tuple]:
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" )
lowercase_ = (self.timesteps == timestep).nonzero().item()
lowercase_ = timestep_index + 1
lowercase_ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(UpperCAmelCase )
if len(self.ets ) == 1:
lowercase_ = self.ets[-1]
elif len(self.ets ) == 2:
lowercase_ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
lowercase_ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
lowercase_ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
lowercase_ = self._get_prev_sample(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCAmelCase )
def A__ ( self , UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = self.alphas[timestep_index]
lowercase_ = self.betas[timestep_index]
lowercase_ = self.alphas[prev_timestep_index]
lowercase_ = self.betas[prev_timestep_index]
lowercase_ = (sample - sigma * ets) / max(UpperCAmelCase , 1e-8 )
lowercase_ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self ) -> List[str]:
'''simple docstring'''
return self.config.num_train_timesteps
| 297 | 0 |
"""simple docstring"""
class __A :
"""simple docstring"""
def __init__( self , __A ) -> None:
a =len(__A )
a =[0] * len_array
if len_array > 0:
a =array[0]
for i in range(1 , __A ):
a =self.prefix_sum[i - 1] + array[i]
def SCREAMING_SNAKE_CASE ( self , __A , __A ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def SCREAMING_SNAKE_CASE ( self , __A ) -> bool:
a ={0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(__A )
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 81 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _A ( ):
"""simple docstring"""
a =ArgumentParser(
description=(
'''PyTorch TPU distributed training launch '''
'''helper utility that will spawn up '''
'''multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=lowercase , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=lowercase , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=lowercase )
return parser.parse_args()
def _A ( ):
"""simple docstring"""
a =parse_args()
# Import training_script as a module.
a =Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
a =script_fpath.stem
a =importlib.import_module(lowercase )
# Patch sys.argv
a =[args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main() | 81 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCAmelCase : Union[str, Any] = {
'''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''',
'''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''',
'''kssteven/ibert-roberta-large-mnli''': (
'''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'''
),
}
class A_ ( lowerCamelCase_ ):
lowerCAmelCase__ = """ibert"""
def __init__( self: str ,__lowerCAmelCase: Any=30_522 ,__lowerCAmelCase: List[str]=768 ,__lowerCAmelCase: Any=12 ,__lowerCAmelCase: Union[str, Any]=12 ,__lowerCAmelCase: Union[str, Any]=3_072 ,__lowerCAmelCase: Tuple="gelu" ,__lowerCAmelCase: Any=0.1 ,__lowerCAmelCase: Any=0.1 ,__lowerCAmelCase: List[str]=512 ,__lowerCAmelCase: Any=2 ,__lowerCAmelCase: Tuple=0.02 ,__lowerCAmelCase: List[Any]=1e-12 ,__lowerCAmelCase: Tuple=1 ,__lowerCAmelCase: Tuple=0 ,__lowerCAmelCase: List[Any]=2 ,__lowerCAmelCase: Any="absolute" ,__lowerCAmelCase: List[Any]=False ,__lowerCAmelCase: Optional[int]="none" ,**__lowerCAmelCase: str ,):
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ ,bos_token_id=lowerCAmelCase__ ,eos_token_id=lowerCAmelCase__ ,**lowerCAmelCase__ )
_lowerCamelCase : List[str] = vocab_size
_lowerCamelCase : Optional[int] = hidden_size
_lowerCamelCase : Tuple = num_hidden_layers
_lowerCamelCase : Optional[int] = num_attention_heads
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : Any = intermediate_size
_lowerCamelCase : List[str] = hidden_dropout_prob
_lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCamelCase : Optional[Any] = max_position_embeddings
_lowerCamelCase : List[Any] = type_vocab_size
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : Union[str, Any] = layer_norm_eps
_lowerCamelCase : Dict = position_embedding_type
_lowerCamelCase : Union[str, Any] = quant_mode
_lowerCamelCase : int = force_dequant
class A_ ( lowerCamelCase_ ):
@property
def _lowercase ( self: str ):
'''simple docstring'''
if self.task == "multiple-choice":
_lowerCamelCase : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_lowerCamelCase : Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] ) | 366 |
"""simple docstring"""
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
_lowerCAmelCase : str = '''0.12''' # assumed parallelism: 8
if is_torch_available():
import torch
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ) -> List[Any]:
'''simple docstring'''
if rng is None:
_lowerCamelCase : Union[str, Any] = random.Random()
_lowerCamelCase : Union[str, Any] = 1
for dim in shape:
total_dims *= dim
_lowerCamelCase : Optional[int] = []
for _ in range(_lowerCamelCase ):
values.append(rng.randint(0 , vocab_size - 1 ) )
_lowerCamelCase : Union[str, Any] = np.array(_lowerCamelCase , dtype=jnp.intaa ).reshape(_lowerCamelCase )
return output
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=None ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase : Optional[int] = ids_tensor(_lowerCamelCase , vocab_size=2 , rng=_lowerCamelCase )
# make sure that at least one token is attended to for each batch
_lowerCamelCase : List[str] = 1
return attn_mask
@require_flax
class A_ :
lowerCAmelCase__ = None
lowerCAmelCase__ = ()
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
_lowerCamelCase : List[str] = 2
_lowerCamelCase : str = inputs["input_ids"].shape[-1] // 2
_lowerCamelCase : Tuple = inputs["input_ids"][:max_batch_size, :sequence_length]
_lowerCamelCase : Any = jnp.ones_like(__lowerCAmelCase )
_lowerCamelCase : List[Any] = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
_lowerCamelCase : Optional[Any] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
_lowerCamelCase : List[str] = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Tuple = self._get_input_ids_and_config()
_lowerCamelCase : List[Any] = False
_lowerCamelCase : Dict = max_length
_lowerCamelCase : Tuple = 0
for model_class in self.all_generative_model_classes:
_lowerCamelCase : str = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
_lowerCamelCase : Any = getattr(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Dict = pt_model_class(__lowerCAmelCase ).eval()
_lowerCamelCase : Optional[Any] = load_flax_weights_in_pytorch_model(__lowerCAmelCase ,flax_model.params )
_lowerCamelCase : int = flax_model.generate(__lowerCAmelCase ).sequences
_lowerCamelCase : Optional[int] = pt_model.generate(torch.tensor(__lowerCAmelCase ,dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
_lowerCamelCase : List[Any] = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() ,flax_generation_outputs.tolist() )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = self._get_input_ids_and_config()
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : Union[str, Any] = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase )
_lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Dict = jit(model.generate )
_lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = self._get_input_ids_and_config()
_lowerCamelCase : List[Any] = True
_lowerCamelCase : Optional[int] = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : List[Any] = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Dict = jit(model.generate )
_lowerCamelCase : int = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = self._get_input_ids_and_config()
_lowerCamelCase : int = False
_lowerCamelCase : Optional[Any] = max_length
_lowerCamelCase : Dict = 2
for model_class in self.all_generative_model_classes:
_lowerCamelCase : List[str] = model_class(__lowerCAmelCase )
_lowerCamelCase : Dict = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Tuple = jit(model.generate )
_lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Dict = self._get_input_ids_and_config()
_lowerCamelCase : Tuple = False
_lowerCamelCase : Union[str, Any] = max_length
_lowerCamelCase : List[str] = 2
_lowerCamelCase : Optional[int] = 2
for model_class in self.all_generative_model_classes:
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : str = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[0] ,input_ids.shape[0] * config.num_return_sequences )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = self._get_input_ids_and_config()
_lowerCamelCase : int = True
_lowerCamelCase : List[Any] = max_length
_lowerCamelCase : Optional[Any] = 0.8
_lowerCamelCase : Union[str, Any] = 10
_lowerCamelCase : List[str] = 0.3
_lowerCamelCase : Tuple = 1
_lowerCamelCase : Any = 8
_lowerCamelCase : str = 9
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase )
_lowerCamelCase : Any = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : int = jit(model.generate )
_lowerCamelCase : Optional[int] = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[Any] = self._get_input_ids_and_config()
_lowerCamelCase : List[str] = max_length
_lowerCamelCase : Tuple = 1
_lowerCamelCase : Any = 8
_lowerCamelCase : Dict = 9
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Any = model_class(__lowerCAmelCase )
_lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Any = jit(model.generate )
_lowerCamelCase : Any = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = self._get_input_ids_and_config()
_lowerCamelCase : Dict = max_length
_lowerCamelCase : List[Any] = 2
_lowerCamelCase : Tuple = 1
_lowerCamelCase : List[str] = 8
_lowerCamelCase : List[Any] = 9
for model_class in self.all_generative_model_classes:
_lowerCamelCase : int = model_class(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Tuple = jit(model.generate )
_lowerCamelCase : Optional[Any] = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = self._get_input_ids_and_config()
# pad attention mask on the left
_lowerCamelCase : Tuple = attention_mask.at[(0, 0)].set(0 )
_lowerCamelCase : Dict = False
_lowerCamelCase : Any = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Any = jit(model.generate )
_lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Any = self._get_input_ids_and_config()
# pad attention mask on the left
_lowerCamelCase : Optional[Any] = attention_mask.at[(0, 0)].set(0 )
_lowerCamelCase : List[str] = True
_lowerCamelCase : Optional[Any] = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Any = jit(model.generate )
_lowerCamelCase : List[Any] = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = self._get_input_ids_and_config()
# pad attention mask on the left
_lowerCamelCase : List[str] = attention_mask.at[(0, 0)].set(0 )
_lowerCamelCase : int = 2
_lowerCamelCase : int = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : int = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Dict = jit(model.generate )
_lowerCamelCase : Dict = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
@require_flax
class A_ ( unittest.TestCase ):
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" )
_lowerCamelCase : Union[str, Any] = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
_lowerCamelCase : Optional[Any] = "Hello world"
_lowerCamelCase : str = tokenizer(__lowerCAmelCase ,return_tensors="np" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__lowerCAmelCase ,"do_samples" ):
model.generate(__lowerCAmelCase ,do_samples=__lowerCAmelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__lowerCAmelCase ,"foo" ):
_lowerCamelCase : List[str] = {"foo": "bar"}
model.generate(__lowerCAmelCase ,**__lowerCAmelCase ) | 340 | 0 |
import math
def A_ ( _UpperCAmelCase ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = f"Input value of [number={number}] must be an integer"
raise TypeError(_UpperCAmelCase )
if number < 1:
SCREAMING_SNAKE_CASE_: Tuple = f"Input value of [number={number}] must be > 0"
raise ValueError(_UpperCAmelCase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
SCREAMING_SNAKE_CASE_: Dict = int(math.log(number // 3 , 2 ) ) + 2
SCREAMING_SNAKE_CASE_: Tuple = [3, 5]
SCREAMING_SNAKE_CASE_: int = 2
SCREAMING_SNAKE_CASE_: int = 3
for block in range(1 , _UpperCAmelCase ):
for _ in range(_UpperCAmelCase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
lowerCAmelCase : List[str] = 0
try:
lowerCAmelCase : List[str] = proth(number)
except ValueError:
print(f'''ValueError: there is no {number}th Proth number''')
continue
print(f'''The {number}th Proth number: {value}''')
| 13 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
lowerCAmelCase__ : Tuple = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
lowerCAmelCase__ : Optional[int] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
lowerCAmelCase__ : str = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
lowerCAmelCase__ : Dict = torch.tensor(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCAmelCase__ : Optional[int] = model(__UpperCAmelCase )["""last_hidden_state"""].detach()
self.assertEqual(output.shape ,__UpperCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] ,__UpperCAmelCase ,atol=1E-3 ) )
@slow
def UpperCAmelCase_ ( self ) -> int:
lowerCAmelCase__ : Union[str, Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" )
lowerCAmelCase__ : Optional[Any] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
lowerCAmelCase__ : Dict = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim
lowerCAmelCase__ : Union[str, Any] = torch.tensor(
[[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase )["""last_hidden_state"""].detach()
self.assertEqual(output.shape ,__UpperCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] ,__UpperCAmelCase ,atol=1E-3 ) )
| 37 | 0 |
'''simple docstring'''
from manim import *
class _UpperCAmelCase ( lowerCAmelCase_ ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = Rectangle(height=0.5,width=0.5 )
__lowerCAmelCase = Rectangle(height=0.46,width=0.46 ).set_stroke(width=0 )
__lowerCAmelCase = [mem.copy() for i in range(6 )]
__lowerCAmelCase = [mem.copy() for i in range(6 )]
__lowerCAmelCase = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE,buff=0 )
__lowerCAmelCase = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE,buff=0 )
__lowerCAmelCase = VGroup(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE,buff=0 )
__lowerCAmelCase = Text("""CPU""",font_size=24 )
__lowerCAmelCase = Group(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE,buff=0.5,aligned_edge=__SCREAMING_SNAKE_CASE )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = [mem.copy() for i in range(1 )]
__lowerCAmelCase = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE,buff=0 )
__lowerCAmelCase = Text("""GPU""",font_size=24 )
__lowerCAmelCase = Group(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE,buff=0.5,aligned_edge=__SCREAMING_SNAKE_CASE )
gpu.align_to(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
gpu.set_x(gpu.get_x() - 1 )
self.add(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = [mem.copy() for i in range(6 )]
__lowerCAmelCase = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE,buff=0 )
__lowerCAmelCase = Text("""Model""",font_size=24 )
__lowerCAmelCase = Group(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE,buff=0.5,aligned_edge=__SCREAMING_SNAKE_CASE )
model.move_to([3, -1.0, 0] )
self.play(
Create(__SCREAMING_SNAKE_CASE,run_time=1 ),Create(__SCREAMING_SNAKE_CASE,run_time=1 ),Create(__SCREAMING_SNAKE_CASE,run_time=1 ),)
__lowerCAmelCase = MarkupText(
f'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.',font_size=24,)
__lowerCAmelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__lowerCAmelCase = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model',font_size=18,)
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(__SCREAMING_SNAKE_CASE,run_time=2.5 ),Write(__SCREAMING_SNAKE_CASE ),Write(__SCREAMING_SNAKE_CASE ) )
self.add(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = []
for i, rect in enumerate(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = Rectangle(height=0.46,width=0.46 ).set_stroke(width=0.0 ).set_fill(__SCREAMING_SNAKE_CASE,opacity=0.7 )
cpu_target.move_to(__SCREAMING_SNAKE_CASE )
cpu_target.generate_target()
__lowerCAmelCase = 0.46 / 4
__lowerCAmelCase = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ),buff=0.02,direction=__SCREAMING_SNAKE_CASE )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target,direction=__SCREAMING_SNAKE_CASE,buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target,direction=__SCREAMING_SNAKE_CASE,buff=0.0 )
cpu_targs.append(__SCREAMING_SNAKE_CASE )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(__SCREAMING_SNAKE_CASE ) )
second_animations.append(MoveToTarget(__SCREAMING_SNAKE_CASE,run_time=1.5 ) )
self.play(*__SCREAMING_SNAKE_CASE )
self.play(*__SCREAMING_SNAKE_CASE )
self.wait()
| 46 |
'''simple docstring'''
import warnings
from .generation import TFGenerationMixin
class _UpperCAmelCase ( lowerCAmelCase_ ):
# warning at import time
warnings.warn(
"""Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will """
"""be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.""" , lowerCAmelCase_ , )
| 46 | 1 |
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = SwinConfig(image_size=1_92 )
if "base" in model_name:
SCREAMING_SNAKE_CASE = 6
SCREAMING_SNAKE_CASE = 1_28
SCREAMING_SNAKE_CASE = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE = (4, 8, 16, 32)
elif "large" in model_name:
SCREAMING_SNAKE_CASE = 12
SCREAMING_SNAKE_CASE = 1_92
SCREAMING_SNAKE_CASE = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE = (6, 12, 24, 48)
else:
raise ValueError("""Model not supported, only supports base and large variants""" )
SCREAMING_SNAKE_CASE = window_size
SCREAMING_SNAKE_CASE = embed_dim
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = num_heads
return config
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
if "encoder.mask_token" in name:
SCREAMING_SNAKE_CASE = name.replace("""encoder.mask_token""" , """embeddings.mask_token""" )
if "encoder.patch_embed.proj" in name:
SCREAMING_SNAKE_CASE = name.replace("""encoder.patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "encoder.patch_embed.norm" in name:
SCREAMING_SNAKE_CASE = name.replace("""encoder.patch_embed.norm""" , """embeddings.norm""" )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
SCREAMING_SNAKE_CASE = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "encoder.norm.weight":
SCREAMING_SNAKE_CASE = """layernorm.weight"""
if name == "encoder.norm.bias":
SCREAMING_SNAKE_CASE = """layernorm.bias"""
if "decoder" in name:
pass
else:
SCREAMING_SNAKE_CASE = """swin.""" + name
return name
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if "attn_mask" in key:
pass
elif "qkv" in key:
SCREAMING_SNAKE_CASE = key.split(""".""" )
SCREAMING_SNAKE_CASE = int(key_split[2] )
SCREAMING_SNAKE_CASE = int(key_split[4] )
SCREAMING_SNAKE_CASE = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
SCREAMING_SNAKE_CASE = val[:dim, :]
SCREAMING_SNAKE_CASE = val[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE = val[
:dim
]
SCREAMING_SNAKE_CASE = val[
dim : dim * 2
]
SCREAMING_SNAKE_CASE = val[
-dim:
]
else:
SCREAMING_SNAKE_CASE = val
return orig_state_dict
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = torch.load(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )["""model"""]
SCREAMING_SNAKE_CASE = get_swin_config(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = SwinForMaskedImageModeling(_SCREAMING_SNAKE_CASE )
model.eval()
SCREAMING_SNAKE_CASE = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE = ViTImageProcessor(size={"""height""": 1_92, """width""": 1_92} )
SCREAMING_SNAKE_CASE = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
SCREAMING_SNAKE_CASE = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**_SCREAMING_SNAKE_CASE ).logits
print(outputs.keys() )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
print(F"""Pushing model and image processor for {model_name} to hub""" )
model.push_to_hub(F"""microsoft/{model_name}""" )
image_processor.push_to_hub(F"""microsoft/{model_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""swin-base-simmim-window6-192""",
type=str,
choices=["""swin-base-simmim-window6-192""", """swin-large-simmim-window12-192"""],
help="""Name of the Swin SimMIM model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth""",
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 296 |
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = SwinConfig()
SCREAMING_SNAKE_CASE = swin_name.split("""_""" )
SCREAMING_SNAKE_CASE = name_split[1]
SCREAMING_SNAKE_CASE = int(name_split[4] )
SCREAMING_SNAKE_CASE = int(name_split[3][-1] )
if model_size == "tiny":
SCREAMING_SNAKE_CASE = 96
SCREAMING_SNAKE_CASE = (2, 2, 6, 2)
SCREAMING_SNAKE_CASE = (3, 6, 12, 24)
elif model_size == "small":
SCREAMING_SNAKE_CASE = 96
SCREAMING_SNAKE_CASE = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE = (3, 6, 12, 24)
elif model_size == "base":
SCREAMING_SNAKE_CASE = 1_28
SCREAMING_SNAKE_CASE = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE = (4, 8, 16, 32)
else:
SCREAMING_SNAKE_CASE = 1_92
SCREAMING_SNAKE_CASE = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE = (6, 12, 24, 48)
if "in22k" in swin_name:
SCREAMING_SNAKE_CASE = 2_18_41
else:
SCREAMING_SNAKE_CASE = 10_00
SCREAMING_SNAKE_CASE = """huggingface/label-files"""
SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json"""
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = img_size
SCREAMING_SNAKE_CASE = num_classes
SCREAMING_SNAKE_CASE = embed_dim
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = num_heads
SCREAMING_SNAKE_CASE = window_size
return config
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
SCREAMING_SNAKE_CASE = """encoder.""" + name
if "attn.proj" in name:
SCREAMING_SNAKE_CASE = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
SCREAMING_SNAKE_CASE = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "norm.weight":
SCREAMING_SNAKE_CASE = """layernorm.weight"""
if name == "norm.bias":
SCREAMING_SNAKE_CASE = """layernorm.bias"""
if "head" in name:
SCREAMING_SNAKE_CASE = name.replace("""head""" , """classifier""" )
else:
SCREAMING_SNAKE_CASE = """swin.""" + name
return name
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if "mask" in key:
continue
elif "qkv" in key:
SCREAMING_SNAKE_CASE = key.split(""".""" )
SCREAMING_SNAKE_CASE = int(key_split[1] )
SCREAMING_SNAKE_CASE = int(key_split[3] )
SCREAMING_SNAKE_CASE = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
SCREAMING_SNAKE_CASE = val[:dim, :]
SCREAMING_SNAKE_CASE = val[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE = val[
:dim
]
SCREAMING_SNAKE_CASE = val[
dim : dim * 2
]
SCREAMING_SNAKE_CASE = val[
-dim:
]
else:
SCREAMING_SNAKE_CASE = val
return orig_state_dict
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE )
timm_model.eval()
SCREAMING_SNAKE_CASE = get_swin_config(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = SwinForImageClassification(_SCREAMING_SNAKE_CASE )
model.eval()
SCREAMING_SNAKE_CASE = convert_state_dict(timm_model.state_dict() , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swin_name.replace("""_""" , """-""" ) ) )
SCREAMING_SNAKE_CASE = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
SCREAMING_SNAKE_CASE = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE = timm_model(inputs["""pixel_values"""] )
SCREAMING_SNAKE_CASE = model(**_SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 )
print(F"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 296 | 1 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=__lowercase )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : str = field(default="question-answering-extractive" , metadata={"include_in_asdict_even_if_is_default": True} )
UpperCAmelCase__ : ClassVar[Features] = Features({"question": Value("string" ), "context": Value("string" )} )
UpperCAmelCase__ : ClassVar[Features] = Features(
{
"answers": Sequence(
{
"text": Value("string" ),
"answer_start": Value("int32" ),
} )
} )
UpperCAmelCase__ : str = "question"
UpperCAmelCase__ : str = "context"
UpperCAmelCase__ : str = "answers"
@property
def __lowercase ( self ) -> Dict[str, str]:
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 15 |
import numpy as np
def __UpperCAmelCase ( __a : np.ndarray ,__a : np.ndarray ,__a : float = 1E-12 ,__a : int = 100 ,) -> tuple[float, np.ndarray]:
"""simple docstring"""
assert np.shape(__a )[0] == np.shape(__a )[1]
# Ensure proper dimensionality.
assert np.shape(__a )[0] == np.shape(__a )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(__a ) == np.iscomplexobj(__a )
_a : List[str] = np.iscomplexobj(__a )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(__a ,input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
_a : List[str] = False
_a : List[str] = 0
_a : Tuple = 0
_a : str = 1E12
while not convergence:
# Multiple matrix by the vector.
_a : str = np.dot(__a ,__a )
# Normalize the resulting output vector.
_a : List[Any] = w / np.linalg.norm(__a )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
_a : Dict = vector.conj().T if is_complex else vector.T
_a : Tuple = np.dot(__a ,np.dot(__a ,__a ) )
# Check convergence.
_a : List[str] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
_a : Dict = True
_a : str = lambda_
if is_complex:
_a : Tuple = np.real(lambda_ )
return lambda_, vector
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
_a : List[str] = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
_a : int = np.array([41, 4, 20] )
_a : Optional[Any] = real_input_matrix.astype(np.complexaaa )
_a : int = np.triu(1j * complex_input_matrix ,1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
_a : Union[str, Any] = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
_a : Optional[int] = real_input_matrix
_a : Union[str, Any] = real_vector
elif problem_type == "complex":
_a : str = complex_input_matrix
_a : str = complex_vector
# Our implementation.
_a , _a : Optional[Any] = power_iteration(__a ,__a )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
_a , _a : List[str] = np.linalg.eigh(__a )
# Last eigenvalue is the maximum one.
_a : Tuple = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
_a : List[Any] = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(__a ) - np.abs(__a ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 15 | 1 |
def UpperCAmelCase__ ( lowerCamelCase ):
assert column_title.isupper()
lowercase :Any = 0
lowercase :Union[str, Any] = len(lowerCamelCase ) - 1
lowercase :Any = 0
while index >= 0:
lowercase :List[Any] = (ord(column_title[index] ) - 64) * pow(26, lowerCamelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 236 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
_UpperCAmelCase : Union[str, Any] = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
_UpperCAmelCase : List[Any] = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
class __lowerCAmelCase ( lowerCAmelCase):
_a = '''whisper'''
_a = ['''past_key_values''']
_a = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self: int , _lowerCAmelCase: str=5_18_65 , _lowerCAmelCase: str=80 , _lowerCAmelCase: int=6 , _lowerCAmelCase: Tuple=4 , _lowerCAmelCase: Union[str, Any]=6 , _lowerCAmelCase: List[Any]=4 , _lowerCAmelCase: Any=15_36 , _lowerCAmelCase: Union[str, Any]=15_36 , _lowerCAmelCase: str=0.0 , _lowerCAmelCase: str=0.0 , _lowerCAmelCase: List[Any]=5_02_57 , _lowerCAmelCase: Optional[Any]=True , _lowerCAmelCase: Tuple=True , _lowerCAmelCase: str="gelu" , _lowerCAmelCase: Dict=2_56 , _lowerCAmelCase: Union[str, Any]=0.0 , _lowerCAmelCase: Any=0.0 , _lowerCAmelCase: Dict=0.0 , _lowerCAmelCase: Union[str, Any]=0.02 , _lowerCAmelCase: Any=False , _lowerCAmelCase: List[str]=15_00 , _lowerCAmelCase: Tuple=4_48 , _lowerCAmelCase: Optional[Any]=5_02_56 , _lowerCAmelCase: Dict=5_02_56 , _lowerCAmelCase: List[Any]=5_02_56 , _lowerCAmelCase: Union[str, Any]=None , _lowerCAmelCase: str=[2_20, 5_02_56] , _lowerCAmelCase: Optional[int]=False , _lowerCAmelCase: Optional[int]=2_56 , _lowerCAmelCase: int=False , _lowerCAmelCase: Dict=0.05 , _lowerCAmelCase: Optional[Any]=10 , _lowerCAmelCase: List[str]=2 , _lowerCAmelCase: Tuple=0.0 , _lowerCAmelCase: str=10 , _lowerCAmelCase: Union[str, Any]=0 , _lowerCAmelCase: List[Any]=7 , **_lowerCAmelCase: Union[str, Any] , ):
lowercase :Optional[Any] = vocab_size
lowercase :Optional[int] = num_mel_bins
lowercase :Union[str, Any] = d_model
lowercase :List[Any] = encoder_layers
lowercase :Optional[Any] = encoder_attention_heads
lowercase :Union[str, Any] = decoder_layers
lowercase :List[str] = decoder_attention_heads
lowercase :Optional[int] = decoder_ffn_dim
lowercase :List[Any] = encoder_ffn_dim
lowercase :Optional[Any] = dropout
lowercase :Tuple = attention_dropout
lowercase :Tuple = activation_dropout
lowercase :Optional[Any] = activation_function
lowercase :Any = init_std
lowercase :Optional[int] = encoder_layerdrop
lowercase :Optional[int] = decoder_layerdrop
lowercase :str = use_cache
lowercase :Optional[Any] = encoder_layers
lowercase :List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase :Any = max_source_positions
lowercase :Optional[Any] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
lowercase :int = classifier_proj_size
lowercase :List[str] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase :Tuple = apply_spec_augment
lowercase :int = mask_time_prob
lowercase :Union[str, Any] = mask_time_length
lowercase :Dict = mask_time_min_masks
lowercase :Tuple = mask_feature_prob
lowercase :List[Any] = mask_feature_length
lowercase :List[Any] = mask_feature_min_masks
lowercase :Any = median_filter_width
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , suppress_tokens=_lowerCAmelCase , begin_suppress_tokens=_lowerCAmelCase , **_lowerCAmelCase , )
class __lowerCAmelCase ( lowerCAmelCase):
@property
def SCREAMING_SNAKE_CASE ( self: str ):
lowercase :Tuple = OrderedDict(
[
("input_features", {0: "batch", 1: "feature_size", 2: "encoder_sequence"}),
] )
if self.use_past:
lowercase :List[Any] = {0: "batch"}
else:
lowercase :str = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase , direction="inputs" )
return common_inputs
def SCREAMING_SNAKE_CASE ( self: Dict , _lowerCAmelCase: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , _lowerCAmelCase: int = -1 , _lowerCAmelCase: int = -1 , _lowerCAmelCase: bool = False , _lowerCAmelCase: Optional["TensorType"] = None , _lowerCAmelCase: int = 2_20_50 , _lowerCAmelCase: float = 5.0 , _lowerCAmelCase: int = 2_20 , ):
lowercase :List[str] = OrderedDict()
lowercase :str = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=_lowerCAmelCase , framework=_lowerCAmelCase , sampling_rate=_lowerCAmelCase , time_duration=_lowerCAmelCase , frequency=_lowerCAmelCase , )
lowercase :Optional[Any] = encoder_inputs["input_features"].shape[2]
lowercase :List[str] = encoder_sequence_length // 2 if self.use_past else seq_length
lowercase :Dict = super().generate_dummy_inputs(
preprocessor.tokenizer , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowercase :str = encoder_inputs.pop("input_features" )
lowercase :Optional[int] = decoder_inputs.pop("decoder_input_ids" )
if "past_key_values" in decoder_inputs:
lowercase :List[str] = decoder_inputs.pop("past_key_values" )
return dummy_inputs
@property
def SCREAMING_SNAKE_CASE ( self: str ):
return 1e-3
| 236 | 1 |
"""simple docstring"""
import os
import sys
import unittest
_lowerCAmelCase : Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
_lowerCAmelCase : List[Any] = os.path.join(git_repo_path, '''src''', '''diffusers''')
class A_ ( unittest.TestCase ):
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : List[str] = find_backend(" if not is_torch_available():" )
self.assertEqual(__lowerCAmelCase ,"torch" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_lowerCamelCase : Optional[Any] = find_backend(" if not (is_torch_available() and is_transformers_available()):" )
self.assertEqual(__lowerCAmelCase ,"torch_and_transformers" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_lowerCamelCase : Any = find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" )
self.assertEqual(__lowerCAmelCase ,"torch_and_transformers_and_onnx" )
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" ,__lowerCAmelCase )
self.assertIn("torch_and_transformers" ,__lowerCAmelCase )
self.assertIn("flax_and_transformers" ,__lowerCAmelCase )
self.assertIn("torch_and_transformers_and_onnx" ,__lowerCAmelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" ,objects["torch"] )
self.assertIn("FlaxUNet2DConditionModel" ,objects["flax"] )
self.assertIn("StableDiffusionPipeline" ,objects["torch_and_transformers"] )
self.assertIn("FlaxStableDiffusionPipeline" ,objects["flax_and_transformers"] )
self.assertIn("LMSDiscreteScheduler" ,objects["torch_and_scipy"] )
self.assertIn("OnnxStableDiffusionPipeline" ,objects["torch_and_transformers_and_onnx"] )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = create_dummy_object("CONSTANT" ,"'torch'" )
self.assertEqual(__lowerCAmelCase ,"\nCONSTANT = None\n" )
_lowerCamelCase : Tuple = create_dummy_object("function" ,"'torch'" )
self.assertEqual(
__lowerCAmelCase ,"\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
_lowerCamelCase : Dict = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
_lowerCamelCase : Optional[int] = create_dummy_object("FakeClass" ,"'torch'" )
self.assertEqual(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
_lowerCamelCase : List[str] = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] ,__lowerCAmelCase ) | 340 |
"""simple docstring"""
from collections import defaultdict
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Optional[int] = 1
_lowerCamelCase : str = True
for v in tree[start]:
if v not in visited:
ret += dfs(_lowerCamelCase )
if ret % 2 == 0:
cuts.append(_lowerCamelCase )
return ret
def lowerCamelCase_( ) -> int:
'''simple docstring'''
dfs(1 )
if __name__ == "__main__":
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = 10, 9
_lowerCAmelCase : str = defaultdict(list)
_lowerCAmelCase : dict[int, bool] = {}
_lowerCAmelCase : list[int] = []
_lowerCAmelCase : Any = 0
_lowerCAmelCase : Any = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1) | 340 | 1 |
"""simple docstring"""
import os
import time
import numpy as np
import onnxruntime as ort
UpperCAmelCase : Tuple = "1"
UpperCAmelCase : List[str] = "0"
UpperCAmelCase : List[str] = "1"
UpperCAmelCase : str = ort.SessionOptions()
UpperCAmelCase : Optional[Any] = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print("Create inference session...")
UpperCAmelCase : Dict = ["TensorrtExecutionProvider", "CUDAExecutionProvider"]
UpperCAmelCase : List[str] = ort.InferenceSession("model.onnx", sess_options=sess_opt, providers=execution_provider)
UpperCAmelCase : int = ort.RunOptions()
UpperCAmelCase : Dict = 128
UpperCAmelCase : List[str] = 1
UpperCAmelCase : str = np.ones((batch, sequence), dtype=np.intaa)
UpperCAmelCase : List[str] = np.ones((batch, sequence), dtype=np.intaa)
UpperCAmelCase : str = np.ones((batch, sequence), dtype=np.intaa)
print("Warm up phase...")
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("Start inference...")
UpperCAmelCase : Union[str, Any] = time.time()
UpperCAmelCase : Optional[Any] = 2000
UpperCAmelCase : Union[str, Any] = {}
for iter in range(max_iters):
UpperCAmelCase : Tuple = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("Average Inference Time = {:.3f} ms".format((time.time() - start_time) * 1000 / max_iters))
| 136 |
"""simple docstring"""
from __future__ import annotations
import math
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> list:
'''simple docstring'''
if len(__lowerCAmelCase ) != 2 or len(a[0] ) != 2 or len(__lowerCAmelCase ) != 2 or len(b[0] ) != 2:
raise Exception("""Matrices are not 2x2""" )
lowercase_ = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__lowerCAmelCase ) )
]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__lowerCAmelCase ) )
]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> tuple[list, list, list, list]:
'''simple docstring'''
if len(__lowerCAmelCase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("""Odd matrices are not supported!""" )
lowercase_ = len(__lowerCAmelCase )
lowercase_ = matrix_length // 2
lowercase_ = [[a[i][j] for j in range(__lowerCAmelCase , __lowerCAmelCase )] for i in range(__lowerCAmelCase )]
lowercase_ = [
[a[i][j] for j in range(__lowerCAmelCase , __lowerCAmelCase )] for i in range(__lowerCAmelCase , __lowerCAmelCase )
]
lowercase_ = [[a[i][j] for j in range(__lowerCAmelCase )] for i in range(__lowerCAmelCase )]
lowercase_ = [[a[i][j] for j in range(__lowerCAmelCase )] for i in range(__lowerCAmelCase , __lowerCAmelCase )]
return top_left, top_right, bot_left, bot_right
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> tuple[int, int]:
'''simple docstring'''
return len(__lowerCAmelCase ), len(matrix[0] )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> None:
'''simple docstring'''
print("""\n""".join(str(__lowerCAmelCase ) for line in matrix ) )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> list:
'''simple docstring'''
if matrix_dimensions(__lowerCAmelCase ) == (2, 2):
return default_matrix_multiplication(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = split_matrix(__lowerCAmelCase )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = split_matrix(__lowerCAmelCase )
lowercase_ = actual_strassen(__lowerCAmelCase , matrix_subtraction(__lowerCAmelCase , __lowerCAmelCase ) )
lowercase_ = actual_strassen(matrix_addition(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase )
lowercase_ = actual_strassen(matrix_addition(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase )
lowercase_ = actual_strassen(__lowerCAmelCase , matrix_subtraction(__lowerCAmelCase , __lowerCAmelCase ) )
lowercase_ = actual_strassen(matrix_addition(__lowerCAmelCase , __lowerCAmelCase ) , matrix_addition(__lowerCAmelCase , __lowerCAmelCase ) )
lowercase_ = actual_strassen(matrix_subtraction(__lowerCAmelCase , __lowerCAmelCase ) , matrix_addition(__lowerCAmelCase , __lowerCAmelCase ) )
lowercase_ = actual_strassen(matrix_subtraction(__lowerCAmelCase , __lowerCAmelCase ) , matrix_addition(__lowerCAmelCase , __lowerCAmelCase ) )
lowercase_ = matrix_addition(matrix_subtraction(matrix_addition(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase ) , __lowerCAmelCase )
lowercase_ = matrix_addition(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = matrix_addition(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = matrix_subtraction(matrix_subtraction(matrix_addition(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase ) , __lowerCAmelCase )
# construct the new matrix from our 4 quadrants
lowercase_ = []
for i in range(len(__lowerCAmelCase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(__lowerCAmelCase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> list:
'''simple docstring'''
if matrix_dimensions(__lowerCAmelCase )[1] != matrix_dimensions(__lowerCAmelCase )[0]:
lowercase_ = (
"""Unable to multiply these matrices, please check the dimensions.\n"""
F'''Matrix A: {matrixa}\n'''
F'''Matrix B: {matrixa}'''
)
raise Exception(__lowerCAmelCase )
lowercase_ = matrix_dimensions(__lowerCAmelCase )
lowercase_ = matrix_dimensions(__lowerCAmelCase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
lowercase_ = max(*__lowerCAmelCase , *__lowerCAmelCase )
lowercase_ = int(math.pow(2 , math.ceil(math.loga(__lowerCAmelCase ) ) ) )
lowercase_ = matrixa
lowercase_ = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , __lowerCAmelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowerCAmelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowerCAmelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
lowercase_ = actual_strassen(__lowerCAmelCase , __lowerCAmelCase )
# Removing the additional zeros
for i in range(0 , __lowerCAmelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowerCAmelCase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
UpperCAmelCase : List[Any] = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
UpperCAmelCase : Optional[int] = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 136 | 1 |
def UpperCamelCase ( __lowerCamelCase : int = 10**12 ):
snake_case : Tuple = 1
snake_case : Any = 0
snake_case : Any = 1
snake_case : str = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(F'{solution() = }')
| 10 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__lowerCamelCase = ["""text""", """image""", """audio"""]
def UpperCamelCase ( __lowerCamelCase : List[str] ):
snake_case : str = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
inputs.append(create_inputs(__lowerCamelCase ) )
else:
raise ValueError(f"""Invalid type requested: {input_type}""" )
return inputs
def UpperCamelCase ( __lowerCamelCase : List ):
snake_case : List[str] = []
for output in outputs:
if isinstance(__lowerCamelCase , (str, AgentText) ):
output_types.append("text" )
elif isinstance(__lowerCamelCase , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(__lowerCamelCase , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(f"""Invalid output: {output}""" )
return output_types
@is_tool_test
class UpperCAmelCase :
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> List[str]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "inputs" ) )
self.assertTrue(hasattr(self.tool , "outputs" ) )
snake_case : List[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , snake_case__ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
snake_case : str = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[str] = create_inputs(self.tool.inputs )
snake_case : Dict = self.tool(*snake_case__ )
# There is a single output
if len(self.tool.outputs ) == 1:
snake_case : List[Any] = [outputs]
self.assertListEqual(output_types(snake_case__ ) , self.tool.outputs )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> List[Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "description" ) )
self.assertTrue(hasattr(self.tool , "default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def _SCREAMING_SNAKE_CASE (self : int ) -> Union[str, Any]:
'''simple docstring'''
snake_case : str = create_inputs(self.tool.inputs )
snake_case : int = self.tool(*snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
snake_case : Optional[Any] = [outputs]
self.assertEqual(len(snake_case__ ) , len(self.tool.outputs ) )
for output, output_type in zip(snake_case__ , self.tool.outputs ):
snake_case : Any = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(snake_case__ , snake_case__ ) )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : List[Any] = create_inputs(self.tool.inputs )
snake_case : str = []
for _input, input_type in zip(snake_case__ , self.tool.inputs ):
if isinstance(snake_case__ , snake_case__ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
snake_case : Optional[int] = self.tool(*snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
snake_case : List[str] = [outputs]
self.assertEqual(len(snake_case__ ) , len(self.tool.outputs ) )
| 10 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
A_ : List[str] = None
A_ : Any = logging.get_logger(__name__)
A_ : Any = {"vocab_file": "sentencepiece.model", "tokenizer_file": "tokenizer.json"}
A_ : Optional[Any] = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
"tokenizer_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/tokenizer.json",
},
}
A_ : Optional[int] = {
"google/rembert": 256,
}
A_ : Dict = "▁"
class lowerCamelCase (A__ ):
lowerCamelCase__ : Optional[Any] = VOCAB_FILES_NAMES
lowerCamelCase__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Dict = RemBertTokenizer
def __init__( self : List[str] , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : str=None , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : str=True , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : Optional[int]="[CLS]" , __UpperCAmelCase : List[Any]="[SEP]" , __UpperCAmelCase : Union[str, Any]="<unk>" , __UpperCAmelCase : Tuple="[SEP]" , __UpperCAmelCase : str="<pad>" , __UpperCAmelCase : Tuple="[CLS]" , __UpperCAmelCase : Optional[Any]="[MASK]" , **__UpperCAmelCase : str , ) -> Tuple:
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , **__UpperCAmelCase , )
SCREAMING_SNAKE_CASE__ = do_lower_case
SCREAMING_SNAKE_CASE__ = remove_space
SCREAMING_SNAKE_CASE__ = keep_accents
SCREAMING_SNAKE_CASE__ = vocab_file
SCREAMING_SNAKE_CASE__ = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None , __UpperCAmelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
def SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error("""Vocabulary path ({}) should be a directory""".format(__UpperCAmelCase ) )
return
SCREAMING_SNAKE_CASE__ = os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
return (out_vocab_file,)
| 165 |
"""simple docstring"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def A ( snake_case__ ):
'''simple docstring'''
return (data["data"], data["target"])
def A ( snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = XGBClassifier()
classifier.fit(snake_case__ , snake_case__ )
return classifier
def A ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = load_iris()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = data_handling(snake_case__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = train_test_split(
snake_case__ , snake_case__ , test_size=0.25 )
SCREAMING_SNAKE_CASE__ = iris["""target_names"""]
# Create an XGBoost Classifier from the training data
SCREAMING_SNAKE_CASE__ = xgboost(snake_case__ , snake_case__ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
snake_case__ , snake_case__ , snake_case__ , display_labels=snake_case__ , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 165 | 1 |
"""simple docstring"""
from __future__ import annotations
import time
import numpy as np
__A = [8, 5, 9, 7]
__A = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
__A = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class snake_case :
def __init__( self : Dict , UpperCamelCase__ : list[int] , UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : list[list[int]] , )-> None:
'''simple docstring'''
__lowerCAmelCase: Any = claim_vector
__lowerCAmelCase: List[Any] = allocated_resources_table
__lowerCAmelCase: Dict = maximum_claim_table
def lowercase_ ( self : List[Any])-> list[int]:
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table)
for i in range(len(self.__allocated_resources_table[0]))
]
def lowercase_ ( self : str)-> list[int]:
'''simple docstring'''
return np.array(self.__claim_vector) - np.array(
self.__processes_resource_summation())
def lowercase_ ( self : Dict)-> list[list[int]]:
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i]) - np.array(UpperCamelCase__))
for i, allocated_resource in enumerate(self.__allocated_resources_table)
]
def lowercase_ ( self : str)-> dict[int, list[int]]:
'''simple docstring'''
return {self.__need().index(UpperCamelCase__): i for i in self.__need()}
def lowercase_ ( self : str , **UpperCamelCase__ : List[str])-> None:
'''simple docstring'''
__lowerCAmelCase: Tuple = self.__need()
__lowerCAmelCase: List[str] = self.__allocated_resources_table
__lowerCAmelCase: Tuple = self.__available_resources()
__lowerCAmelCase: Any = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("_" * 5_0 + "\n")
while need_list:
__lowerCAmelCase: Optional[Any] = False
for each_need in need_list:
__lowerCAmelCase: int = True
for index, need in enumerate(UpperCamelCase__):
if need > available_resources[index]:
__lowerCAmelCase: List[str] = False
break
if execution:
__lowerCAmelCase: int = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__lowerCAmelCase: Optional[int] = original_need_index
print(f"Process {process_number + 1} is executing.")
# remove the process run from stack
need_list.remove(UpperCamelCase__)
# update available/freed resources stack
__lowerCAmelCase: Tuple = np.array(UpperCamelCase__) + np.array(
alloc_resources_table[process_number])
print(
"Updated available resource stack for processes: "
+ " ".join([str(UpperCamelCase__) for x in available_resources]))
break
if safe:
print("The process is in a safe state.\n")
else:
print("System in unsafe state. Aborting...\n")
break
def lowercase_ ( self : Union[str, Any])-> Tuple:
'''simple docstring'''
print(" " * 9 + "Allocated Resource Table")
for item in self.__allocated_resources_table:
print(
f"P{self.__allocated_resources_table.index(UpperCamelCase__) + 1}"
+ " ".join(f"{it:>8}" for it in item)
+ "\n")
print(" " * 9 + "System Resource Table")
for item in self.__maximum_claim_table:
print(
f"P{self.__maximum_claim_table.index(UpperCamelCase__) + 1}"
+ " ".join(f"{it:>8}" for it in item)
+ "\n")
print(
"Current Usage by Active Processes: "
+ " ".join(str(UpperCamelCase__) for x in self.__claim_vector))
print(
"Initial Available Resources: "
+ " ".join(str(UpperCamelCase__) for x in self.__available_resources()))
time.sleep(1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 108 |
"""simple docstring"""
from __future__ import annotations
from math import pi
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> dict[str, float]:
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 108 | 1 |
'''simple docstring'''
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> Any:
'''simple docstring'''
if isinstance(__lowercase , torch.Tensor ):
return image
elif isinstance(__lowercase , PIL.Image.Image ):
_A = [image]
if isinstance(image[0] , PIL.Image.Image ):
_A = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image]
_A = np.concatenate(__lowercase , axis=0 )
_A = np.array(__lowercase ).astype(np.floataa ) / 255.0
_A = image.transpose(0 , 3 , 1 , 2 )
_A = 2.0 * image - 1.0
_A = torch.from_numpy(__lowercase )
elif isinstance(image[0] , torch.Tensor ):
_A = torch.cat(__lowercase , dim=0 )
return image
def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase=0.9995 ) -> Any:
'''simple docstring'''
if not isinstance(__lowercase , np.ndarray ):
_A = True
_A = va.device
_A = va.cpu().numpy()
_A = va.cpu().numpy()
_A = np.sum(va * va / (np.linalg.norm(__lowercase ) * np.linalg.norm(__lowercase )) )
if np.abs(__lowercase ) > DOT_THRESHOLD:
_A = (1 - t) * va + t * va
else:
_A = np.arccos(__lowercase )
_A = np.sin(__lowercase )
_A = theta_a * t
_A = np.sin(__lowercase )
_A = np.sin(theta_a - theta_t ) / sin_theta_a
_A = sin_theta_t / sin_theta_a
_A = sa * va + sa * va
if inputs_are_torch:
_A = torch.from_numpy(__lowercase ).to(__lowercase )
return va
def __lowercase ( __lowercase , __lowercase ) -> Optional[int]:
'''simple docstring'''
_A = F.normalize(__lowercase , dim=-1 )
_A = F.normalize(__lowercase , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def __lowercase ( __lowercase , __lowercase ) -> List[Any]:
'''simple docstring'''
for param in model.parameters():
_A = value
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self : int , __UpperCAmelCase : AutoencoderKL , __UpperCAmelCase : CLIPTextModel , __UpperCAmelCase : CLIPModel , __UpperCAmelCase : CLIPTokenizer , __UpperCAmelCase : UNetaDConditionModel , __UpperCAmelCase : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , __UpperCAmelCase : CLIPFeatureExtractor , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : Tuple=None , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , clip_model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , coca_model=__UpperCAmelCase , coca_tokenizer=__UpperCAmelCase , coca_transform=__UpperCAmelCase , )
_A = (
feature_extractor.size
if isinstance(feature_extractor.size , __UpperCAmelCase )
else feature_extractor.size["shortest_edge"]
)
_A = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , __UpperCAmelCase )
set_requires_grad(self.clip_model , __UpperCAmelCase )
def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_A = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCAmelCase )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
self.enable_attention_slicing(__UpperCAmelCase )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
set_requires_grad(self.vae , __UpperCAmelCase )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
set_requires_grad(self.vae , __UpperCAmelCase )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
set_requires_grad(self.unet , __UpperCAmelCase )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
set_requires_grad(self.unet , __UpperCAmelCase )
def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any ):
'''simple docstring'''
_A = min(int(num_inference_steps * strength ) , __UpperCAmelCase )
_A = max(num_inference_steps - init_timestep , 0 )
_A = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCAmelCase ( self : Any , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Dict , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : Dict=None ):
'''simple docstring'''
if not isinstance(__UpperCAmelCase , torch.Tensor ):
raise ValueError(f'''`image` has to be of type `torch.Tensor` but is {type(__UpperCAmelCase )}''' )
_A = image.to(device=__UpperCAmelCase , dtype=__UpperCAmelCase )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_A = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__UpperCAmelCase )
]
_A = torch.cat(__UpperCAmelCase , dim=0 )
else:
_A = self.vae.encode(__UpperCAmelCase ).latent_dist.sample(__UpperCAmelCase )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_A = 0.18215 * init_latents
_A = init_latents.repeat_interleave(__UpperCAmelCase , dim=0 )
_A = randn_tensor(init_latents.shape , generator=__UpperCAmelCase , device=__UpperCAmelCase , dtype=__UpperCAmelCase )
# get latents
_A = self.scheduler.add_noise(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
_A = init_latents
return latents
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Dict ):
'''simple docstring'''
_A = self.coca_transform(__UpperCAmelCase ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
_A = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
_A = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("<end_of_text>" )[0].replace("<start_of_text>" , "" ).rstrip(" .," )
def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Any , __UpperCAmelCase : int ):
'''simple docstring'''
_A = self.feature_extractor.preprocess(__UpperCAmelCase )
_A = torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half()
_A = self.clip_model.get_image_features(__UpperCAmelCase )
_A = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__UpperCAmelCase )
_A = image_embeddings_clip.repeat_interleave(__UpperCAmelCase , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def lowerCAmelCase ( self : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int , __UpperCAmelCase : Dict , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , ):
'''simple docstring'''
_A = latents.detach().requires_grad_()
_A = self.scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase )
# predict the noise residual
_A = self.unet(__UpperCAmelCase , __UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
_A = self.scheduler.alphas_cumprod[timestep]
_A = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_A = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_A = torch.sqrt(__UpperCAmelCase )
_A = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , __UpperCAmelCase ):
_A = self.scheduler.sigmas[index]
_A = latents - sigma * noise_pred
else:
raise ValueError(f'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_A = 1 / 0.18215 * sample
_A = self.vae.decode(__UpperCAmelCase ).sample
_A = (image / 2 + 0.5).clamp(0 , 1 )
_A = transforms.Resize(self.feature_extractor_size )(__UpperCAmelCase )
_A = self.normalize(__UpperCAmelCase ).to(latents.dtype )
_A = self.clip_model.get_image_features(__UpperCAmelCase )
_A = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__UpperCAmelCase )
_A = spherical_dist_loss(__UpperCAmelCase , __UpperCAmelCase ).mean() * clip_guidance_scale
_A = -torch.autograd.grad(__UpperCAmelCase , __UpperCAmelCase )[0]
if isinstance(self.scheduler , __UpperCAmelCase ):
_A = latents.detach() + grads * (sigma**2)
_A = noise_pred_original
else:
_A = noise_pred_original - torch.sqrt(__UpperCAmelCase ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : Union[str, Any] , __UpperCAmelCase : Union[torch.FloatTensor, PIL.Image.Image] , __UpperCAmelCase : Union[torch.FloatTensor, PIL.Image.Image] , __UpperCAmelCase : Optional[str] = None , __UpperCAmelCase : Optional[str] = None , __UpperCAmelCase : Optional[int] = 512 , __UpperCAmelCase : Optional[int] = 512 , __UpperCAmelCase : float = 0.6 , __UpperCAmelCase : Optional[int] = 50 , __UpperCAmelCase : Optional[float] = 7.5 , __UpperCAmelCase : Optional[int] = 1 , __UpperCAmelCase : float = 0.0 , __UpperCAmelCase : Optional[float] = 100 , __UpperCAmelCase : Optional[torch.Generator] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , __UpperCAmelCase : float = 0.8 , __UpperCAmelCase : float = 0.1 , __UpperCAmelCase : float = 0.1 , ):
'''simple docstring'''
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) != batch_size:
raise ValueError(f'''You have passed {batch_size} batch_size, but only {len(__UpperCAmelCase )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(__UpperCAmelCase , torch.Generator ) and batch_size > 1:
_A = [generator] + [None] * (batch_size - 1)
_A = [
("model", self.coca_model is None),
("tokenizer", self.coca_tokenizer is None),
("transform", self.coca_transform is None),
]
_A = [x[0] for x in coca_is_none if x[1]]
_A = ", ".join(__UpperCAmelCase )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(__UpperCAmelCase ):
raise ValueError(
f'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
f'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
_A = self.get_image_description(__UpperCAmelCase )
if style_prompt is None:
if len(__UpperCAmelCase ):
raise ValueError(
f'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
f''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
_A = self.get_image_description(__UpperCAmelCase )
# get prompt text embeddings for content and style
_A = self.tokenizer(
__UpperCAmelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=__UpperCAmelCase , return_tensors="pt" , )
_A = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
_A = self.tokenizer(
__UpperCAmelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=__UpperCAmelCase , return_tensors="pt" , )
_A = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
_A = slerp(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# duplicate text embeddings for each generation per prompt
_A = text_embeddings.repeat_interleave(__UpperCAmelCase , dim=0 )
# set timesteps
_A = "offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
_A = {}
if accepts_offset:
_A = 1
self.scheduler.set_timesteps(__UpperCAmelCase , **__UpperCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
_A , _A = self.get_timesteps(__UpperCAmelCase , __UpperCAmelCase , self.device )
_A = timesteps[:1].repeat(__UpperCAmelCase )
# Preprocess image
_A = preprocess(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
_A = self.prepare_latents(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , text_embeddings.dtype , self.device , __UpperCAmelCase )
_A = preprocess(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
_A = self.prepare_latents(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , text_embeddings.dtype , self.device , __UpperCAmelCase )
_A = slerp(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if clip_guidance_scale > 0:
_A = self.get_clip_image_embeddings(__UpperCAmelCase , __UpperCAmelCase )
_A = self.get_clip_image_embeddings(__UpperCAmelCase , __UpperCAmelCase )
_A = slerp(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_A = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_A = content_text_input.input_ids.shape[-1]
_A = self.tokenizer([""] , padding="max_length" , max_length=__UpperCAmelCase , return_tensors="pt" )
_A = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
_A = uncond_embeddings.repeat_interleave(__UpperCAmelCase , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_A = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_A = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_A = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_A = torch.randn(__UpperCAmelCase , generator=__UpperCAmelCase , device="cpu" , dtype=__UpperCAmelCase ).to(
self.device )
else:
_A = torch.randn(__UpperCAmelCase , generator=__UpperCAmelCase , device=self.device , dtype=__UpperCAmelCase )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
_A = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_A = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_A = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_A = {}
if accepts_eta:
_A = eta
# check if the scheduler accepts generator
_A = "generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
_A = generator
with self.progress_bar(total=__UpperCAmelCase ):
for i, t in enumerate(__UpperCAmelCase ):
# expand the latents if we are doing classifier free guidance
_A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_A = self.scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase )
# predict the noise residual
_A = self.unet(__UpperCAmelCase , __UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_A , _A = noise_pred.chunk(2 )
_A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_A = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
_A , _A = self.cond_fn(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , )
# compute the previous noisy sample x_t -> x_t-1
_A = self.scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_A = 1 / 0.18215 * latents
_A = self.vae.decode(__UpperCAmelCase ).sample
_A = (image / 2 + 0.5).clamp(0 , 1 )
_A = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_A = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=__UpperCAmelCase , nsfw_content_detected=__UpperCAmelCase )
| 79 |
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class __A :
'''simple docstring'''
__lowerCamelCase : CommonSchedulerState
# setable values
__lowerCamelCase : jnp.ndarray
__lowerCamelCase : jnp.ndarray
__lowerCamelCase : Optional[int] = None
@classmethod
def a__ (cls , A , A , A ) -> str:
"""simple docstring"""
return cls(common=A , init_noise_sigma=A , timesteps=A )
@dataclass
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : DDPMSchedulerState
class __A ( A , A ):
'''simple docstring'''
__lowerCamelCase : Dict = [e.name for e in FlaxKarrasDiffusionSchedulers]
__lowerCamelCase : jnp.dtype
@property
def a__ (self ) -> List[str]:
"""simple docstring"""
return True
@register_to_config
def __init__(self , A = 1_000 , A = 0.0001 , A = 0.02 , A = "linear" , A = None , A = "fixed_small" , A = True , A = "epsilon" , A = jnp.floataa , ) -> Union[str, Any]:
"""simple docstring"""
_a = dtype
def a__ (self , A = None ) -> DDPMSchedulerState:
"""simple docstring"""
if common is None:
_a = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
_a = jnp.array(1.0 , dtype=self.dtype )
_a = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=A , init_noise_sigma=A , timesteps=A , )
def a__ (self , A , A , A = None ) -> jnp.ndarray:
"""simple docstring"""
return sample
def a__ (self , A , A , A = () ) -> DDPMSchedulerState:
"""simple docstring"""
_a = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
_a = (jnp.arange(0 , A ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=A , timesteps=A , )
def a__ (self , A , A , A=None , A=None ) -> int:
"""simple docstring"""
_a = state.common.alphas_cumprod[t]
_a = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
_a = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
_a = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
_a = jnp.clip(A , a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
_a = jnp.log(jnp.clip(A , a_min=1E-20 ) )
elif variance_type == "fixed_large":
_a = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
_a = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
_a = variance
_a = state.common.betas[t]
_a = (predicted_variance + 1) / 2
_a = frac * max_log + (1 - frac) * min_log
return variance
def a__ (self , A , A , A , A , A = None , A = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
"""simple docstring"""
_a = timestep
if key is None:
_a = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
_a , _a = jnp.split(A , sample.shape[1] , axis=1 )
else:
_a = None
# 1. compute alphas, betas
_a = state.common.alphas_cumprod[t]
_a = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
_a = 1 - alpha_prod_t
_a = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
_a = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
_a = model_output
elif self.config.prediction_type == "v_prediction":
_a = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
_a = jnp.clip(A , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_a = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
_a = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_a = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
_a = jax.random.split(A , num=1 )
_a = jax.random.normal(A , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(A , A , predicted_variance=A ) ** 0.5) * noise
_a = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
_a = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=A , state=A )
def a__ (self , A , A , A , A , ) -> jnp.ndarray:
"""simple docstring"""
return add_noise_common(state.common , A , A , A )
def a__ (self , A , A , A , A , ) -> jnp.ndarray:
"""simple docstring"""
return get_velocity_common(state.common , A , A , A )
def __len__(self ) -> Tuple:
"""simple docstring"""
return self.config.num_train_timesteps
| 211 | 0 |
"""simple docstring"""
import os
import numpy
import onnx
def snake_case ( A__ ,A__ ):
UpperCAmelCase_ : Optional[Any] = a.name
UpperCAmelCase_ : Any = b.name
UpperCAmelCase_ : Dict = ""
UpperCAmelCase_ : List[str] = ""
UpperCAmelCase_ : int = a == b
UpperCAmelCase_ : Tuple = name_a
UpperCAmelCase_ : Any = name_b
return res
def snake_case ( A__ ,A__ ,A__ ):
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(A__ ,A__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g ,A__ ,A__ )
_graph_replace_input_with(node_proto.attribute[1].g ,A__ ,A__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g ,A__ ,A__ )
def snake_case ( A__ ,A__ ,A__ ):
for n in graph_proto.node:
_node_replace_input_with(A__ ,A__ ,A__ )
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : List[Any] = list(model.graph.initializer )
UpperCAmelCase_ : Optional[int] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
UpperCAmelCase_ : Tuple = inits[i].name
UpperCAmelCase_ : Dict = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph ,A__ ,A__ )
def snake_case ( A__ ):
UpperCAmelCase_ : List[str] = os.path.dirname(A__ )
UpperCAmelCase_ : List[str] = os.path.basename(A__ )
UpperCAmelCase_ : List[str] = onnx.load(os.path.join(A__ ,A__ ) )
UpperCAmelCase_ : List[str] = list(model.graph.initializer )
UpperCAmelCase_ : List[str] = set()
UpperCAmelCase_ : Union[str, Any] = {}
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : Optional[Any] = 0
for i in range(len(A__ ) ):
if i in dup_set:
continue
for j in range(i + 1 ,len(A__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] ,inits[j] ):
dup_set.add(A__ )
dup_set.add(A__ )
UpperCAmelCase_ : List[Any] = inits[j].data_type
UpperCAmelCase_ : Union[str, Any] = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("unexpected data type: " ,A__ )
total_reduced_size += mem_size
UpperCAmelCase_ : Tuple = inits[i].name
UpperCAmelCase_ : List[Any] = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(A__ )
else:
UpperCAmelCase_ : Any = [name_j]
ind_to_replace.append((j, i) )
print("total reduced size: " ,total_reduced_size / 10_24 / 10_24 / 10_24 ,"GB" )
UpperCAmelCase_ : Union[str, Any] = sorted(A__ )
_remove_dup_initializers_from_model(A__ ,A__ ,A__ )
UpperCAmelCase_ : Tuple = "optimized_" + model_file_name
UpperCAmelCase_ : Any = os.path.join(A__ ,A__ )
onnx.save(A__ ,A__ )
return new_model
| 366 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import MutableSequence
class UpperCamelCase_ :
def __init__( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : MutableSequence[float] ) -> None:
if len(lowerCAmelCase_ ) != degree + 1:
raise ValueError(
"The number of coefficients should be equal to the degree + 1." )
UpperCAmelCase_ : list[float] = list(lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = degree
def __add__( self : int , lowerCAmelCase_ : Polynomial ) -> Polynomial:
if self.degree > polynomial_a.degree:
UpperCAmelCase_ : List[str] = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , lowerCAmelCase_ )
else:
UpperCAmelCase_ : Optional[Any] = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , lowerCAmelCase_ )
def __sub__( self : Union[str, Any] , lowerCAmelCase_ : Polynomial ) -> Polynomial:
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : List[str] ) -> Polynomial:
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : Optional[Any] , lowerCAmelCase_ : Polynomial ) -> Polynomial:
UpperCAmelCase_ : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : int | float ) -> int | float:
UpperCAmelCase_ : int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : Tuple ) -> str:
UpperCAmelCase_ : str = ""
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(lowerCAmelCase_ )
return polynomial
def __repr__( self : Union[str, Any] ) -> str:
return self.__str__()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Polynomial:
UpperCAmelCase_ : list[float] = [0] * self.degree
for i in range(self.degree ):
UpperCAmelCase_ : List[Any] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : int | float = 0 ) -> Polynomial:
UpperCAmelCase_ : list[float] = [0] * (self.degree + 2)
UpperCAmelCase_ : List[Any] = constant
for i in range(self.degree + 1 ):
UpperCAmelCase_ : Union[str, Any] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , lowerCAmelCase_ )
def __eq__( self : Union[str, Any] , lowerCAmelCase_ : object ) -> bool:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : Tuple , lowerCAmelCase_ : object ) -> bool:
return not self.__eq__(lowerCAmelCase_ )
| 253 | 0 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowercase__ = {
"""cola""": 2,
"""mnli""": 3,
"""mrpc""": 2,
"""sst-2""": 2,
"""sts-b""": 1,
"""qqp""": 2,
"""qnli""": 2,
"""rte""": 2,
"""wnli""": 2,
}
logging.set_verbosity_info()
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__=None ):
# Initialise PyTorch model
_lowerCamelCase : Dict = XLNetConfig.from_json_file(lowercase__ )
_lowerCamelCase : int = finetuning_task.lower() if finetuning_task is not None else ''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''' )
_lowerCamelCase : int = finetuning_task
_lowerCamelCase : str = GLUE_TASKS_NUM_LABELS[finetuning_task]
_lowerCamelCase : Dict = XLNetForSequenceClassification(lowercase__ )
elif "squad" in finetuning_task:
_lowerCamelCase : Any = finetuning_task
_lowerCamelCase : Dict = XLNetForQuestionAnswering(lowercase__ )
else:
_lowerCamelCase : Tuple = XLNetLMHeadModel(lowercase__ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
_lowerCamelCase : Tuple = os.path.join(lowercase__ , lowercase__ )
_lowerCamelCase : Any = os.path.join(lowercase__ , lowercase__ )
print(f'''Save PyTorch model to {os.path.abspath(lowercase__ )}''' )
torch.save(model.state_dict() , lowercase__ )
print(f'''Save configuration file to {os.path.abspath(lowercase__ )}''' )
with open(lowercase__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--xlnet_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained XLNet model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--finetuning_task""",
default=None,
type=str,
help="""Name of a task on which the XLNet TensorFlow model was fine-tuned""",
)
lowercase__ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
) | 96 |
from __future__ import annotations
def __magic_name__ ( __a : list[int] , __a : int ):
'''simple docstring'''
if len(__a ) == 0:
return False
UpperCamelCase__ = len(__a ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , __a )
else:
return binary_search(a_list[midpoint + 1 :] , __a )
if __name__ == "__main__":
lowerCamelCase_ = input('''Enter numbers separated by comma:\n''').strip()
lowerCamelCase_ = [int(item.strip()) for item in user_input.split(''',''')]
lowerCamelCase_ = int(input('''Enter the number to be found in the list:\n''').strip())
lowerCamelCase_ = '''''' if binary_search(sequence, target) else '''not '''
print(f'{target} was {not_str}found in {sequence}')
| 244 | 0 |
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
UpperCAmelCase_ : int = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
def __init__( self : List[str] , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : str ) -> Optional[Any]:
warnings.warn(
'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ChineseCLIPImageProcessor instead.' , _a , )
super().__init__(*_a , **_a )
| 363 |
def SCREAMING_SNAKE_CASE_ ( __A : list ) -> list:
"""simple docstring"""
a_ : int = len(__A )
for _ in range(__A ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
a_ , a_ : int = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
UpperCAmelCase_ : int = list(range(10, 0, -1))
print(F'Original: {arr}. Sorted: {odd_even_transposition(arr)}')
| 120 | 0 |
'''simple docstring'''
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Dict ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
__SCREAMING_SNAKE_CASE = Vector()
def UpperCAmelCase__ ( self : Optional[Any] ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(__SCREAMING_SNAKE_CASE ) , """(0,0,0,0,0,1)""" )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Vector([1, 2, 3, 4] )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 4 )
def UpperCAmelCase__ ( self : int ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Vector([1, 2] )
__SCREAMING_SNAKE_CASE = Vector([1, 2, 3, 4, 5] )
__SCREAMING_SNAKE_CASE = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
__SCREAMING_SNAKE_CASE = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def UpperCAmelCase__ ( self : Tuple ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Vector([1, 2, 3] )
__SCREAMING_SNAKE_CASE = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def UpperCAmelCase__ ( self : Optional[Any] ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Vector([1, 2, 3] )
__SCREAMING_SNAKE_CASE = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Vector([1, 2, 3] )
__SCREAMING_SNAKE_CASE = Vector([2, -1, 4] ) # for test of dot product
__SCREAMING_SNAKE_CASE = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , """(3.0,6.0,9.0)""" )
self.assertEqual((a * b) , 0 )
def UpperCAmelCase__ ( self : Optional[int] ) -> None:
"""simple docstring"""
self.assertEqual(str(zero_vector(10 ) ).count("""0""" ) , 10 )
def UpperCAmelCase__ ( self : Dict ) -> None:
"""simple docstring"""
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , """(0,1,0)""" )
def UpperCAmelCase__ ( self : int ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Vector([1, 2, 3] )
__SCREAMING_SNAKE_CASE = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) , """(3,4,7)""" )
def UpperCAmelCase__ ( self : List[str] ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Vector([1, 0, 0, 0, 0, 0] )
__SCREAMING_SNAKE_CASE = x.copy()
self.assertEqual(str(__SCREAMING_SNAKE_CASE ) , str(__SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase__ ( self : Optional[int] ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(__SCREAMING_SNAKE_CASE ) , """(0,1,0)""" )
def UpperCAmelCase__ ( self : int ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(__SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase__ ( self : int ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__SCREAMING_SNAKE_CASE = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase__ ( self : Dict ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__SCREAMING_SNAKE_CASE = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase__ ( self : Dict ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def UpperCAmelCase__ ( self : List[str] ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
__SCREAMING_SNAKE_CASE = Vector([1, 2, 3] )
self.assertEqual("""(14,32,50)""" , str(a * x ) )
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2 ) )
def UpperCAmelCase__ ( self : Dict ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(__SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase__ ( self : Optional[int] ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def UpperCAmelCase__ ( self : Dict ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__SCREAMING_SNAKE_CASE = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b ) )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__SCREAMING_SNAKE_CASE = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b ) )
def UpperCAmelCase__ ( self : Optional[Any] ) -> None:
"""simple docstring"""
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 267 |
'''simple docstring'''
from itertools import count
def a__ ( a__ = 50 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [1] * min_block_length
for n in count(a__ ):
fill_count_functions.append(1 )
for block_length in range(a__ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_00_00_00:
break
return n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 267 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
lowerCamelCase_ = logging.get_logger(__name__)
class UpperCamelCase_ (__A ):
__magic_name__ = ['''pixel_values''']
def __init__( self : List[Any] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 255 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase_ : Any , ) -> None:
super().__init__(**lowerCAmelCase_ )
UpperCAmelCase_ : Any = size if size is not None else {"shortest_edge": 256}
UpperCAmelCase_ : List[str] = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
UpperCAmelCase_ : Any = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCAmelCase_ : Union[str, Any] = get_size_dict(lowerCAmelCase_ , param_name="crop_size" )
UpperCAmelCase_ : Dict = do_resize
UpperCAmelCase_ : int = size
UpperCAmelCase_ : Optional[int] = resample
UpperCAmelCase_ : Tuple = do_center_crop
UpperCAmelCase_ : Any = crop_size
UpperCAmelCase_ : List[str] = do_rescale
UpperCAmelCase_ : Dict = rescale_factor
UpperCAmelCase_ : str = do_normalize
UpperCAmelCase_ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Any , ) -> np.ndarray:
UpperCAmelCase_ : Any = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
UpperCAmelCase_ : List[Any] = get_resize_output_image_size(lowerCAmelCase_ , size=size["shortest_edge"] , default_to_square=lowerCAmelCase_ )
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Dict , ) -> np.ndarray:
UpperCAmelCase_ : List[str] = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(lowerCAmelCase_ , size=(size["height"], size["width"]) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Optional[Any] ) -> np.ndarray:
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Optional[Any] , ) -> np.ndarray:
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : Tuple , ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : List[str] = size if size is not None else self.size
UpperCAmelCase_ : Optional[int] = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = resample if resample is not None else self.resample
UpperCAmelCase_ : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ : List[Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ : List[Any] = get_size_dict(lowerCAmelCase_ , param_name="crop_size" )
UpperCAmelCase_ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : Dict = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : Optional[int] = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ : List[str] = image_std if image_std is not None else self.image_std
UpperCAmelCase_ : Optional[Any] = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ : Optional[Any] = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
UpperCAmelCase_ : Union[str, Any] = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images]
if do_center_crop:
UpperCAmelCase_ : Tuple = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images]
if do_rescale:
UpperCAmelCase_ : Union[str, Any] = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images]
if do_normalize:
UpperCAmelCase_ : Any = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images]
UpperCAmelCase_ : Any = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
UpperCAmelCase_ : int = {"pixel_values": images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Tuple] = None ) -> Optional[int]:
UpperCAmelCase_ : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(lowerCAmelCase_ ):
UpperCAmelCase_ : Optional[int] = target_sizes.numpy()
UpperCAmelCase_ : Dict = []
for idx in range(len(lowerCAmelCase_ ) ):
UpperCAmelCase_ : List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=lowerCAmelCase_ )
UpperCAmelCase_ : Any = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCAmelCase_ )
else:
UpperCAmelCase_ : Tuple = logits.argmax(dim=1 )
UpperCAmelCase_ : Optional[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 253 |
"""simple docstring"""
def snake_case ( A__ = 10_00 ):
UpperCAmelCase_ : Optional[Any] = 2**power
UpperCAmelCase_ : Optional[int] = str(A__ )
UpperCAmelCase_ : Tuple = list(A__ )
UpperCAmelCase_ : Any = 0
for i in list_num:
sum_of_num += int(A__ )
return sum_of_num
if __name__ == "__main__":
lowerCamelCase_ = int(input('''Enter the power of 2: ''').strip())
print('''2 ^ ''', power, ''' = ''', 2**power)
lowerCamelCase_ = solution(power)
print('''Sum of the digits is: ''', result)
| 253 | 1 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class A_ :
def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : List[str]=1_3 ,SCREAMING_SNAKE_CASE__ : int=3_2 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=2 ,SCREAMING_SNAKE_CASE__ : Any=3 ,SCREAMING_SNAKE_CASE__ : Optional[int]=1_6 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=[1, 2, 1] ,SCREAMING_SNAKE_CASE__ : Tuple=[2, 2, 4] ,SCREAMING_SNAKE_CASE__ : List[Any]=2 ,SCREAMING_SNAKE_CASE__ : Optional[int]=2.0 ,SCREAMING_SNAKE_CASE__ : Any=True ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.0 ,SCREAMING_SNAKE_CASE__ : int=0.0 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 ,SCREAMING_SNAKE_CASE__ : List[str]="gelu" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ,SCREAMING_SNAKE_CASE__ : List[str]=True ,SCREAMING_SNAKE_CASE__ : Optional[Any]=0.02 ,SCREAMING_SNAKE_CASE__ : List[Any]=1E-5 ,SCREAMING_SNAKE_CASE__ : Optional[int]=True ,SCREAMING_SNAKE_CASE__ : int=None ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=True ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_0 ,SCREAMING_SNAKE_CASE__ : int=8 ,SCREAMING_SNAKE_CASE__ : Tuple=["stage1", "stage2", "stage3"] ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=[1, 2, 3] ,):
__lowerCamelCase : Optional[int] = parent
__lowerCamelCase : Optional[Any] = batch_size
__lowerCamelCase : Optional[Any] = image_size
__lowerCamelCase : Any = patch_size
__lowerCamelCase : List[Any] = num_channels
__lowerCamelCase : Any = embed_dim
__lowerCamelCase : Union[str, Any] = depths
__lowerCamelCase : str = num_heads
__lowerCamelCase : Any = window_size
__lowerCamelCase : List[Any] = mlp_ratio
__lowerCamelCase : str = qkv_bias
__lowerCamelCase : Union[str, Any] = hidden_dropout_prob
__lowerCamelCase : int = attention_probs_dropout_prob
__lowerCamelCase : List[str] = drop_path_rate
__lowerCamelCase : Optional[int] = hidden_act
__lowerCamelCase : Optional[int] = use_absolute_embeddings
__lowerCamelCase : Dict = patch_norm
__lowerCamelCase : List[Any] = layer_norm_eps
__lowerCamelCase : Optional[int] = initializer_range
__lowerCamelCase : Tuple = is_training
__lowerCamelCase : Union[str, Any] = scope
__lowerCamelCase : Optional[int] = use_labels
__lowerCamelCase : str = type_sequence_label_size
__lowerCamelCase : str = encoder_stride
__lowerCamelCase : Union[str, Any] = out_features
__lowerCamelCase : List[Any] = out_indices
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__lowerCamelCase : Dict = None
if self.use_labels:
__lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size)
__lowerCamelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self : List[Any]):
return MaskFormerSwinConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,out_features=self.out_features ,out_indices=self.out_indices ,)
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Any):
__lowerCamelCase : Union[str, Any] = MaskFormerSwinModel(config=_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
__lowerCamelCase : Union[str, Any] = model(_UpperCamelCase)
__lowerCamelCase : Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
__lowerCamelCase : Dict = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim))
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Tuple):
__lowerCamelCase : Any = MaskFormerSwinBackbone(config=_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
__lowerCamelCase : int = model(_UpperCamelCase)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) ,len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) ,[1_3, 1_6, 1_6, 1_6])
# verify channels
self.parent.assertEqual(len(model.channels) ,len(config.out_features))
self.parent.assertListEqual(model.channels ,[1_6, 3_2, 6_4])
# verify ValueError
with self.parent.assertRaises(_UpperCamelCase):
__lowerCamelCase : List[Any] = ['stem']
__lowerCamelCase : List[Any] = MaskFormerSwinBackbone(config=_UpperCamelCase)
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : Any = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[str] = config_and_inputs
__lowerCamelCase : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A_ ( __A , __A , unittest.TestCase ):
_UpperCAmelCase : Tuple = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
_UpperCAmelCase : List[Any] = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : Union[str, Any] = False
_UpperCAmelCase : Dict = False
_UpperCAmelCase : List[Any] = False
def lowerCAmelCase ( self : str):
__lowerCamelCase : Optional[Any] = MaskFormerSwinModelTester(self)
__lowerCamelCase : Tuple = ConfigTester(self ,config_class=_UpperCamelCase ,embed_dim=3_7)
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
))
def lowerCAmelCase ( self : List[str]):
pass
def lowerCAmelCase ( self : int):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self : int):
return
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase)
def lowerCAmelCase ( self : str):
__lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_UpperCamelCase)
@unittest.skip('Swin does not use inputs_embeds')
def lowerCAmelCase ( self : List[str]):
pass
@unittest.skip('Swin does not support feedforward chunking')
def lowerCAmelCase ( self : List[Any]):
pass
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase , __lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Union[str, Any] = model_class(_UpperCamelCase)
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module))
__lowerCamelCase : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCamelCase ,nn.Linear))
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase , __lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Optional[Any] = model_class(_UpperCamelCase)
__lowerCamelCase : List[Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : Optional[int] = [*signature.parameters.keys()]
__lowerCamelCase : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_UpperCamelCase)
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions')
def lowerCAmelCase ( self : Optional[Any]):
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone')
def lowerCAmelCase ( self : int):
pass
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any]):
__lowerCamelCase : List[Any] = model_class(_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
with torch.no_grad():
__lowerCamelCase : Any = model(**self._prepare_for_class(_UpperCamelCase ,_UpperCamelCase))
__lowerCamelCase : Dict = outputs.hidden_states
__lowerCamelCase : List[str] = getattr(
self.model_tester ,'expected_num_hidden_layers' ,len(self.model_tester.depths) + 1)
self.assertEqual(len(_UpperCamelCase) ,_UpperCamelCase)
# Swin has a different seq_length
__lowerCamelCase : int = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
__lowerCamelCase : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]) ,[num_patches, self.model_tester.embed_dim] ,)
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : Optional[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__lowerCamelCase : Any = True
self.check_hidden_states_output(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase : Union[str, Any] = True
self.check_hidden_states_output(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase)
def lowerCAmelCase ( self : str):
__lowerCamelCase , __lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : Optional[int] = 3
__lowerCamelCase : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
__lowerCamelCase : Optional[int] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
__lowerCamelCase : Dict = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__lowerCamelCase : List[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__lowerCamelCase : List[Any] = True
self.check_hidden_states_output(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,(padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase : str = True
self.check_hidden_states_output(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,(padded_height, padded_width))
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints')
def lowerCAmelCase ( self : List[str]):
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin')
def lowerCAmelCase ( self : List[Any]):
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin')
def lowerCAmelCase ( self : List[str]):
pass
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE__ : Any):
__lowerCamelCase : Optional[int] = 0
return t
def check_equivalence(SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : List[Any]={}):
with torch.no_grad():
__lowerCamelCase : Union[str, Any] = model(**_UpperCamelCase ,return_dict=_UpperCamelCase ,**_UpperCamelCase)
__lowerCamelCase : List[Any] = model(**_UpperCamelCase ,return_dict=_UpperCamelCase ,**_UpperCamelCase).to_tuple()
def recursive_check(SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : List[str]):
if isinstance(_UpperCamelCase ,(List, Tuple)):
for tuple_iterable_value, dict_iterable_value in zip(_UpperCamelCase ,_UpperCamelCase):
recursive_check(_UpperCamelCase ,_UpperCamelCase)
elif isinstance(_UpperCamelCase ,_UpperCamelCase):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() ,dict_object.values()):
recursive_check(_UpperCamelCase ,_UpperCamelCase)
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_UpperCamelCase) ,set_nan_tensor_to_zero(_UpperCamelCase) ,atol=1E-5) ,msg=(
'Tuple and dict output are not equal. Difference:'
F" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:"
F" {torch.isnan(_UpperCamelCase).any()} and `inf`: {torch.isinf(_UpperCamelCase)}. Dict has"
F" `nan`: {torch.isnan(_UpperCamelCase).any()} and `inf`: {torch.isinf(_UpperCamelCase)}."
) ,)
recursive_check(_UpperCamelCase ,_UpperCamelCase)
for model_class in self.all_model_classes:
__lowerCamelCase : Optional[Any] = model_class(_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
__lowerCamelCase : Dict = self._prepare_for_class(_UpperCamelCase ,_UpperCamelCase)
__lowerCamelCase : Dict = self._prepare_for_class(_UpperCamelCase ,_UpperCamelCase)
check_equivalence(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase)
__lowerCamelCase : Optional[Any] = self._prepare_for_class(_UpperCamelCase ,_UpperCamelCase ,return_labels=_UpperCamelCase)
__lowerCamelCase : Any = self._prepare_for_class(_UpperCamelCase ,_UpperCamelCase ,return_labels=_UpperCamelCase)
check_equivalence(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase)
__lowerCamelCase : List[str] = self._prepare_for_class(_UpperCamelCase ,_UpperCamelCase)
__lowerCamelCase : Optional[Any] = self._prepare_for_class(_UpperCamelCase ,_UpperCamelCase)
check_equivalence(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,{'output_hidden_states': True})
__lowerCamelCase : str = self._prepare_for_class(_UpperCamelCase ,_UpperCamelCase ,return_labels=_UpperCamelCase)
__lowerCamelCase : Optional[Any] = self._prepare_for_class(_UpperCamelCase ,_UpperCamelCase ,return_labels=_UpperCamelCase)
check_equivalence(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,{'output_hidden_states': True})
@require_torch
class A_ ( unittest.TestCase , __A ):
_UpperCAmelCase : int = (MaskFormerSwinBackbone,) if is_torch_available() else ()
_UpperCAmelCase : int = MaskFormerSwinConfig
def lowerCAmelCase ( self : Any):
__lowerCamelCase : Tuple = MaskFormerSwinModelTester(self)
def lowerCAmelCase ( self : Dict):
__lowerCamelCase , __lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : Optional[Any] = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
__lowerCamelCase : List[Any] = backbone_class(_UpperCamelCase)
backbone.to(_UpperCamelCase)
backbone.eval()
__lowerCamelCase : Optional[Any] = backbone(**_UpperCamelCase)
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps ,_UpperCamelCase)
self.assertTrue(len(outputs.feature_maps) == len(backbone.channels))
for feature_map, n_channels in zip(outputs.feature_maps ,backbone.channels):
self.assertTrue(feature_map.shape[:2] ,(batch_size, n_channels))
self.assertIsNone(outputs.hidden_states)
self.assertIsNone(outputs.attentions)
# Test output_hidden_states=True
__lowerCamelCase : int = backbone(**_UpperCamelCase ,output_hidden_states=_UpperCamelCase)
self.assertIsNotNone(outputs.hidden_states)
self.assertTrue(len(outputs.hidden_states) ,len(backbone.stage_names))
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] ,backbone.channels):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : int = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) ,(batch_size, n_channels))
# Test output_attentions=True
if self.has_attentions:
__lowerCamelCase : Optional[int] = backbone(**_UpperCamelCase ,output_attentions=_UpperCamelCase)
self.assertIsNotNone(outputs.attentions)
| 73 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = "philschmid/bart-large-cnn-samsum"
SCREAMING_SNAKE_CASE : Tuple = (
"This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, "
"and returns a summary of the text."
)
SCREAMING_SNAKE_CASE : str = "summarizer"
SCREAMING_SNAKE_CASE : str = AutoTokenizer
SCREAMING_SNAKE_CASE : str = AutoModelForSeqaSeqLM
SCREAMING_SNAKE_CASE : Optional[int] = ["text"]
SCREAMING_SNAKE_CASE : Optional[int] = ["text"]
def snake_case__( self : str , _UpperCamelCase : int ) ->Optional[int]:
return self.pre_processor(_UpperCamelCase , return_tensors='''pt''' , truncation=_UpperCamelCase )
def snake_case__( self : Tuple , _UpperCamelCase : Optional[int] ) ->Tuple:
return self.model.generate(**_UpperCamelCase )[0]
def snake_case__( self : Optional[Any] , _UpperCamelCase : Optional[int] ) ->Any:
return self.pre_processor.decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase ) | 8 | 0 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
__A = 8
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=BITS ) -> str:
_lowerCAmelCase =x.device
_lowerCAmelCase =(x * 255).int().clamp(0 , 255 )
_lowerCAmelCase =2 ** torch.arange(bits - 1 , -1 , -1 , device=__UpperCamelCase )
_lowerCAmelCase =rearrange(__UpperCamelCase , """d -> d 1 1""" )
_lowerCAmelCase =rearrange(__UpperCamelCase , """b c h w -> b c 1 h w""" )
_lowerCAmelCase =((x & mask) != 0).float()
_lowerCAmelCase =rearrange(__UpperCamelCase , """b c d h w -> b (c d) h w""" )
_lowerCAmelCase =bits * 2 - 1
return bits
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=BITS ) -> List[str]:
_lowerCAmelCase =x.device
_lowerCAmelCase =(x > 0).int()
_lowerCAmelCase =2 ** torch.arange(bits - 1 , -1 , -1 , device=__UpperCamelCase , dtype=torch.intaa )
_lowerCAmelCase =rearrange(__UpperCamelCase , """d -> d 1 1""" )
_lowerCAmelCase =rearrange(__UpperCamelCase , """b (c d) h w -> b c d h w""" , d=8 )
_lowerCAmelCase =reduce(x * mask , """b c d h w -> b c h w""" , """sum""" )
return (dec / 255).clamp(0.0 , 1.0 )
def _lowerCamelCase(self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 0.0 , __UpperCamelCase = True , __UpperCamelCase=None , __UpperCamelCase = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
_lowerCAmelCase =timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
_lowerCAmelCase =self.alphas_cumprod[timestep]
_lowerCAmelCase =self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
_lowerCAmelCase =1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCAmelCase =(sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
_lowerCAmelCase =self.bit_scale
if self.config.clip_sample:
_lowerCAmelCase =torch.clamp(__UpperCamelCase , -scale , __UpperCamelCase )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
_lowerCAmelCase =self._get_variance(__UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase =eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
_lowerCAmelCase =(sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCAmelCase =(1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCAmelCase =alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
_lowerCAmelCase =model_output.device if torch.is_tensor(__UpperCamelCase ) else """cpu"""
_lowerCAmelCase =torch.randn(model_output.shape , dtype=model_output.dtype , generator=__UpperCamelCase ).to(__UpperCamelCase )
_lowerCAmelCase =self._get_variance(__UpperCamelCase , __UpperCamelCase ) ** 0.5 * eta * noise
_lowerCAmelCase =prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=__UpperCamelCase , pred_original_sample=__UpperCamelCase )
def _lowerCamelCase(self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase="epsilon" , __UpperCamelCase=None , __UpperCamelCase = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
_lowerCAmelCase =timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
_lowerCAmelCase , _lowerCAmelCase =torch.split(__UpperCamelCase , sample.shape[1] , dim=1 )
else:
_lowerCAmelCase =None
# 1. compute alphas, betas
_lowerCAmelCase =self.alphas_cumprod[t]
_lowerCAmelCase =self.alphas_cumprod[t - 1] if t > 0 else self.one
_lowerCAmelCase =1 - alpha_prod_t
_lowerCAmelCase =1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
_lowerCAmelCase =(sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
_lowerCAmelCase =model_output
else:
raise ValueError(F'''Unsupported prediction_type {prediction_type}.''' )
# 3. Clip "predicted x_0"
_lowerCAmelCase =self.bit_scale
if self.config.clip_sample:
_lowerCAmelCase =torch.clamp(__UpperCamelCase , -scale , __UpperCamelCase )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowerCAmelCase =(alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
_lowerCAmelCase =self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowerCAmelCase =pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_lowerCAmelCase =0
if t > 0:
_lowerCAmelCase =torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=__UpperCamelCase ).to(model_output.device )
_lowerCAmelCase =(self._get_variance(__UpperCamelCase , predicted_variance=__UpperCamelCase ) ** 0.5) * noise
_lowerCAmelCase =pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=__UpperCamelCase , pred_original_sample=__UpperCamelCase )
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 1.0 , ) -> Union[str, Any]:
super().__init__()
_lowerCAmelCase =bit_scale
_lowerCAmelCase =(
ddim_bit_scheduler_step if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
@torch.no_grad()
def __call__( self , __UpperCAmelCase = 2_56 , __UpperCAmelCase = 2_56 , __UpperCAmelCase = 50 , __UpperCAmelCase = None , __UpperCAmelCase = 1 , __UpperCAmelCase = "pil" , __UpperCAmelCase = True , **__UpperCAmelCase , ) -> Union[Tuple, ImagePipelineOutput]:
_lowerCAmelCase =torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=__UpperCAmelCase , )
_lowerCAmelCase =decimal_to_bits(__UpperCAmelCase ) * self.bit_scale
_lowerCAmelCase =latents.to(self.device )
self.scheduler.set_timesteps(__UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
_lowerCAmelCase =self.unet(__UpperCAmelCase , __UpperCAmelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase =self.scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).prev_sample
_lowerCAmelCase =bits_to_decimal(__UpperCAmelCase )
if output_type == "pil":
_lowerCAmelCase =self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCAmelCase )
| 341 |
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase(__UpperCamelCase ) -> bool:
_lowerCAmelCase =str(__UpperCamelCase )
return n == n[::-1]
def _lowerCamelCase(__UpperCamelCase = 1000000 ) -> str:
_lowerCAmelCase =0
for i in range(1 , __UpperCamelCase ):
if is_palindrome(__UpperCamelCase ) and is_palindrome(bin(__UpperCamelCase ).split("""b""" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 341 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Dict = logging.get_logger("""transformers.models.speecht5""")
SCREAMING_SNAKE_CASE : Any = {
"""speech_encoder_prenet.layer_norm""": """speecht5.encoder.prenet.feature_projection.layer_norm""",
"""speech_encoder_prenet.post_extract_proj""": """speecht5.encoder.prenet.feature_projection.projection""",
"""speech_encoder_prenet.pos_conv.0""": """speecht5.encoder.prenet.pos_conv_embed.conv""",
"""speech_encoder_prenet.mask_emb""": """speecht5.encoder.prenet.masked_spec_embed""",
}
SCREAMING_SNAKE_CASE : List[str] = {
"""text_encoder_prenet.encoder_prenet.0""": """speecht5.encoder.prenet.embed_tokens""",
"""text_encoder_prenet.encoder_prenet.1.alpha""": """speecht5.encoder.prenet.encode_positions.alpha""",
}
SCREAMING_SNAKE_CASE : Any = {
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0""": """speecht5.decoder.prenet.layers.0""",
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0""": """speecht5.decoder.prenet.layers.1""",
"""speech_decoder_prenet.decoder_prenet.0.1""": """speecht5.decoder.prenet.final_layer""",
"""speech_decoder_prenet.decoder_prenet.1.alpha""": """speecht5.decoder.prenet.encode_positions.alpha""",
"""speech_decoder_prenet.spkembs_layer.0""": """speecht5.decoder.prenet.speaker_embeds_layer""",
}
SCREAMING_SNAKE_CASE : int = {
"""speech_decoder_postnet.feat_out""": """speech_decoder_postnet.feat_out""",
"""speech_decoder_postnet.prob_out""": """speech_decoder_postnet.prob_out""",
"""speech_decoder_postnet.postnet.postnet.0.0""": """speech_decoder_postnet.layers.0.conv""",
"""speech_decoder_postnet.postnet.postnet.0.1""": """speech_decoder_postnet.layers.0.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.1.0""": """speech_decoder_postnet.layers.1.conv""",
"""speech_decoder_postnet.postnet.postnet.1.1""": """speech_decoder_postnet.layers.1.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.2.0""": """speech_decoder_postnet.layers.2.conv""",
"""speech_decoder_postnet.postnet.postnet.2.1""": """speech_decoder_postnet.layers.2.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.3.0""": """speech_decoder_postnet.layers.3.conv""",
"""speech_decoder_postnet.postnet.postnet.3.1""": """speech_decoder_postnet.layers.3.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.4.0""": """speech_decoder_postnet.layers.4.conv""",
"""speech_decoder_postnet.postnet.postnet.4.1""": """speech_decoder_postnet.layers.4.batch_norm""",
}
SCREAMING_SNAKE_CASE : List[Any] = {
"""text_decoder_prenet.embed_tokens""": """speecht5.decoder.prenet.embed_tokens""",
}
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"""text_decoder_postnet.output_projection""": """text_decoder_postnet.lm_head""",
}
SCREAMING_SNAKE_CASE : List[str] = {
"""encoder.layers.*.self_attn.k_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj""",
"""encoder.layers.*.self_attn.v_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj""",
"""encoder.layers.*.self_attn.q_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj""",
"""encoder.layers.*.self_attn.out_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj""",
"""encoder.layers.*.self_attn_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.layer_norm""",
"""encoder.layers.*.fc1""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense""",
"""encoder.layers.*.fc2""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense""",
"""encoder.layers.*.final_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """speecht5.encoder.wrapped_encoder.layer_norm""",
"""encoder.pos_emb.pe_k""": """speecht5.encoder.wrapped_encoder.embed_positions.pe_k""",
}
SCREAMING_SNAKE_CASE : Optional[Any] = {
"""decoder.layers.*.self_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj""",
"""decoder.layers.*.self_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj""",
"""decoder.layers.*.self_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj""",
"""decoder.layers.*.self_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj""",
"""decoder.layers.*.self_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm""",
"""decoder.layers.*.encoder_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj""",
"""decoder.layers.*.encoder_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj""",
"""decoder.layers.*.encoder_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj""",
"""decoder.layers.*.encoder_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj""",
"""decoder.layers.*.encoder_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm""",
"""decoder.layers.*.fc1""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense""",
"""decoder.layers.*.fc2""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense""",
"""decoder.layers.*.final_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm""",
}
SCREAMING_SNAKE_CASE : Optional[Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
SCREAMING_SNAKE_CASE : Tuple = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
SCREAMING_SNAKE_CASE : List[str] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : List[Any] = [
"""encoder.version""",
"""encoder.layers.*.norm_k.weight""",
"""encoder.layers.*.norm_k.bias""",
"""decoder.version""",
"""decoder.layers.*.norm_k.weight""",
"""decoder.layers.*.norm_k.bias""",
"""decoder.pos_emb.pe_k""",
"""speech_encoder_prenet.embed_positions._float_tensor""",
"""text_decoder_prenet.embed_positions._float_tensor""",
]
SCREAMING_SNAKE_CASE : Tuple = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""speech_decoder_prenet.*""",
"""speech_decoder_postnet.*""",
]
SCREAMING_SNAKE_CASE : Dict = IGNORE_KEYS + [
"""encoder.proj""",
"""speech_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
SCREAMING_SNAKE_CASE : str = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
def lowercase ( _snake_case : Optional[Any] , _snake_case : List[Any] , _snake_case : List[str] , _snake_case : Optional[int] , _snake_case : Optional[Any] ) ->List[Any]:
"""simple docstring"""
for attribute in key.split('''.''' ):
__snake_case : Union[str, Any] = getattr(_snake_case , _snake_case )
if weight_type is not None:
__snake_case : str = getattr(_snake_case , _snake_case ).shape
else:
__snake_case : str = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__snake_case : List[Any] = value
elif weight_type == "weight_g":
__snake_case : Dict = value
elif weight_type == "weight_v":
__snake_case : List[Any] = value
elif weight_type == "bias":
__snake_case : Dict = value
elif weight_type == "running_mean":
__snake_case : Tuple = value
elif weight_type == "running_var":
__snake_case : Dict = value
elif weight_type == "num_batches_tracked":
__snake_case : Any = value
else:
__snake_case : Dict = value
logger.info(f"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" )
def lowercase ( _snake_case : List[str] , _snake_case : int ) ->List[str]:
"""simple docstring"""
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
__snake_case , __snake_case : Union[str, Any] = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowercase ( _snake_case : Optional[int] , _snake_case : int , _snake_case : Optional[int] ) ->Tuple:
"""simple docstring"""
__snake_case : Tuple = []
if task == "s2t":
__snake_case : List[Any] = hf_model.speechta.encoder.prenet.feature_encoder
__snake_case : Optional[Any] = MAPPING_S2T
__snake_case : int = IGNORE_KEYS_S2T
elif task == "t2s":
__snake_case : Union[str, Any] = None
__snake_case : Tuple = MAPPING_T2S
__snake_case : Union[str, Any] = IGNORE_KEYS_T2S
elif task == "s2s":
__snake_case : Union[str, Any] = hf_model.speechta.encoder.prenet.feature_encoder
__snake_case : Any = MAPPING_S2S
__snake_case : List[str] = IGNORE_KEYS_S2S
else:
raise ValueError(f"""Unsupported task: {task}""" )
for name, value in fairseq_dict.items():
if should_ignore(_snake_case , _snake_case ):
logger.info(f"""{name} was ignored""" )
continue
__snake_case : Any = False
if "conv_layers" in name:
load_conv_layer(
_snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == '''group''' , )
__snake_case : Dict = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
__snake_case , __snake_case : List[str] = key.split('''.*.''' )
if prefix in name and suffix in name:
__snake_case : int = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
__snake_case : Optional[int] = True
if "*" in mapped_key:
__snake_case : Union[str, Any] = name.split(_snake_case )[0].split('''.''' )[-2]
__snake_case : Optional[Any] = mapped_key.replace('''*''' , _snake_case )
if "weight_g" in name:
__snake_case : str = '''weight_g'''
elif "weight_v" in name:
__snake_case : List[Any] = '''weight_v'''
elif "bias" in name:
__snake_case : List[str] = '''bias'''
elif "weight" in name:
__snake_case : Optional[Any] = '''weight'''
elif "running_mean" in name:
__snake_case : Optional[Any] = '''running_mean'''
elif "running_var" in name:
__snake_case : Optional[int] = '''running_var'''
elif "num_batches_tracked" in name:
__snake_case : Union[str, Any] = '''num_batches_tracked'''
else:
__snake_case : str = None
set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
continue
if not is_used:
unused_weights.append(_snake_case )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowercase ( _snake_case : Any , _snake_case : str , _snake_case : Optional[Any] , _snake_case : Any , _snake_case : int ) ->List[str]:
"""simple docstring"""
__snake_case : Dict = full_name.split('''conv_layers.''' )[-1]
__snake_case : str = name.split('''.''' )
__snake_case : Union[str, Any] = int(items[0] )
__snake_case : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__snake_case : Optional[int] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__snake_case : str = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__snake_case : Union[str, Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__snake_case : Any = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_snake_case )
@torch.no_grad()
def lowercase ( _snake_case : Tuple , _snake_case : str , _snake_case : int , _snake_case : Optional[int]=None , _snake_case : Optional[Any]=None , _snake_case : Optional[Any]=None , ) ->int:
"""simple docstring"""
if config_path is not None:
__snake_case : str = SpeechTaConfig.from_pretrained(_snake_case )
else:
__snake_case : Union[str, Any] = SpeechTaConfig()
if task == "s2t":
__snake_case : int = config.max_text_positions
__snake_case : List[Any] = SpeechTaForSpeechToText(_snake_case )
elif task == "t2s":
__snake_case : int = 1_876
__snake_case : List[str] = 600
__snake_case : Union[str, Any] = config.max_speech_positions
__snake_case : Union[str, Any] = SpeechTaForTextToSpeech(_snake_case )
elif task == "s2s":
__snake_case : Union[str, Any] = 1_876
__snake_case : Tuple = config.max_speech_positions
__snake_case : List[str] = SpeechTaForSpeechToSpeech(_snake_case )
else:
raise ValueError(f"""Unknown task name: {task}""" )
if vocab_path:
__snake_case : str = SpeechTaTokenizer(_snake_case , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
__snake_case : Optional[Any] = AddedToken('''<mask>''' , lstrip=_snake_case , rstrip=_snake_case )
__snake_case : str = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
__snake_case : str = SpeechTaFeatureExtractor()
__snake_case : int = SpeechTaProcessor(tokenizer=_snake_case , feature_extractor=_snake_case )
processor.save_pretrained(_snake_case )
__snake_case : Optional[int] = torch.load(_snake_case )
recursively_load_weights(fairseq_checkpoint['''model'''] , _snake_case , _snake_case )
model.save_pretrained(_snake_case )
if repo_id:
print('''Pushing to the hub...''' )
processor.push_to_hub(_snake_case )
model.push_to_hub(_snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--task""",
default="""s2t""",
type=str,
help="""Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--vocab_path""", default=None, type=str, help="""Path to SentencePiece model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 102 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
A : List[str] = logging.get_logger(__name__)
class _lowercase ( lowercase__):
"""simple docstring"""
def __init__( self : Optional[int] , *__lowerCamelCase : List[str] , **__lowerCamelCase : str ):
'''simple docstring'''
warnings.warn(
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use BeitImageProcessor instead." , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
| 184 | 0 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger()
@dataclass
class a__ :
_a : nn.Module
_a : List[nn.Module] = field(default_factory=snake_case__ )
_a : list = field(default_factory=snake_case__ )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = len(list(m.modules() ) ) == 1 or isinstance(_A , nn.Convad ) or isinstance(_A , nn.BatchNormad )
if has_not_submodules:
self.traced.append(_A )
def __call__( self , _A ):
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(_A )
[x.remove() for x in self.handles]
return self
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return list(filter(lambda _A : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class a__ :
_a : nn.Module
_a : nn.Module
_a : int = 1
_a : List = field(default_factory=snake_case__ )
_a : List = field(default_factory=snake_case__ )
_a : bool = True
def __call__( self , _A ):
"""simple docstring"""
__lowerCAmelCase = Tracker(self.dest )(_A ).parametrized
__lowerCAmelCase = Tracker(self.src )(_A ).parametrized
__lowerCAmelCase = list(filter(lambda _A : type(_A ) not in self.src_skip , _A ) )
__lowerCAmelCase = list(filter(lambda _A : type(_A ) not in self.dest_skip , _A ) )
if len(_A ) != len(_A ) and self.raise_if_mismatch:
raise Exception(
f"""Numbers of operations are different. Source module has {len(_A )} operations while"""
f""" destination module has {len(_A )}.""" )
for dest_m, src_m in zip(_A , _A ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"""Transfered from={src_m} to={dest_m}""" )
class a__ ( nn.Module ):
def __init__( self , _A ):
"""simple docstring"""
super().__init__()
__lowerCAmelCase = []
# - get the stem
feature_blocks.append(("conv1", model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("block" ), f"""Unexpected layer name {k}"""
__lowerCAmelCase = len(_A ) + 1
feature_blocks.append((f"""res{block_index}""", v) )
__lowerCAmelCase = nn.ModuleDict(_A )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
return get_trunk_forward_outputs(
_A , out_feat_keys=_A , feature_blocks=self._feature_blocks , )
class a__ ( snake_case__ ):
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase = x.split("-" )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self , _A ):
"""simple docstring"""
if x not in self:
__lowerCAmelCase = self.convert_name_to_timm(_A )
__lowerCAmelCase = partial(lambda: (timm.create_model(_A , pretrained=_A ).eval(), None) )
else:
__lowerCAmelCase = super().__getitem__(_A )
return val
class a__ ( snake_case__ ):
def __getitem__( self , _A ):
"""simple docstring"""
if "seer" in x and "in1k" not in x:
__lowerCAmelCase = RegNetModel
else:
__lowerCAmelCase = RegNetForImageClassification
return val
def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Tuple[str, str]] ):
for from_key, to_key in keys:
__lowerCAmelCase = from_state_dict[from_key].clone()
print(F"""Copied key={from_key} to={to_key}""" )
return to_state_dict
def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Callable[[], nn.Module] , SCREAMING_SNAKE_CASE_ : Callable[[], nn.Module] , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : Path , SCREAMING_SNAKE_CASE_ : bool = True , ):
print(F"""Converting {name}...""" )
with torch.no_grad():
__lowerCAmelCase , __lowerCAmelCase = from_model_func()
__lowerCAmelCase = our_model_func(SCREAMING_SNAKE_CASE_ ).eval()
__lowerCAmelCase = ModuleTransfer(src=SCREAMING_SNAKE_CASE_ , dest=SCREAMING_SNAKE_CASE_ , raise_if_mismatch=SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = torch.randn((1, 3, 2_24, 2_24) )
module_transfer(SCREAMING_SNAKE_CASE_ )
if from_state_dict is not None:
__lowerCAmelCase = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
__lowerCAmelCase = [("0.clf.0.weight", "classifier.1.weight"), ("0.clf.0.bias", "classifier.1.bias")]
__lowerCAmelCase = manually_copy_vissl_head(SCREAMING_SNAKE_CASE_ , our_model.state_dict() , SCREAMING_SNAKE_CASE_ )
our_model.load_state_dict(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = our_model(SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = (
our_outputs.logits if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else our_outputs.last_hidden_state
)
__lowerCAmelCase = from_model(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = from_output[-1] if type(SCREAMING_SNAKE_CASE_ ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
__lowerCAmelCase = our_outputs.hidden_states[-1]
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add model" , use_temp_dir=SCREAMING_SNAKE_CASE_ , )
__lowerCAmelCase = 2_24 if "seer" not in name else 3_84
# we can use the convnext one
__lowerCAmelCase = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" , size=SCREAMING_SNAKE_CASE_ )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add image processor" , use_temp_dir=SCREAMING_SNAKE_CASE_ , )
print(F"""Pushed {name}""" )
def _a ( SCREAMING_SNAKE_CASE_ : Path , SCREAMING_SNAKE_CASE_ : str = None , SCREAMING_SNAKE_CASE_ : bool = True ):
__lowerCAmelCase = "imagenet-1k-id2label.json"
__lowerCAmelCase = 10_00
__lowerCAmelCase = (1, num_labels)
__lowerCAmelCase = "huggingface/label-files"
__lowerCAmelCase = num_labels
__lowerCAmelCase = json.load(open(cached_download(hf_hub_url(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) ) , "r" ) )
__lowerCAmelCase = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
__lowerCAmelCase = partial(SCREAMING_SNAKE_CASE_ , num_labels=SCREAMING_SNAKE_CASE_ , idalabel=SCREAMING_SNAKE_CASE_ , labelaid=SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = {
"regnet-x-002": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 1_52, 3_68] , groups_width=8 , layer_type="x" ),
"regnet-x-004": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 1_60, 3_84] , groups_width=16 , layer_type="x" ),
"regnet-x-006": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 2_40, 5_28] , groups_width=24 , layer_type="x" ),
"regnet-x-008": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 1_28, 2_88, 6_72] , groups_width=16 , layer_type="x" ),
"regnet-x-016": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 1_68, 4_08, 9_12] , groups_width=24 , layer_type="x" ),
"regnet-x-032": ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 1_92, 4_32, 10_08] , groups_width=48 , layer_type="x" ),
"regnet-x-040": ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 2_40, 5_60, 13_60] , groups_width=40 , layer_type="x" ),
"regnet-x-064": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[1_68, 3_92, 7_84, 16_24] , groups_width=56 , layer_type="x" ),
"regnet-x-080": ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 2_40, 7_20, 19_20] , groups_width=1_20 , layer_type="x" ),
"regnet-x-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[2_24, 4_48, 8_96, 22_40] , groups_width=1_12 , layer_type="x" ),
"regnet-x-160": ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[2_56, 5_12, 8_96, 20_48] , groups_width=1_28 , layer_type="x" ),
"regnet-x-320": ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[3_36, 6_72, 13_44, 25_20] , groups_width=1_68 , layer_type="x" ),
# y variant
"regnet-y-002": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 1_52, 3_68] , groups_width=8 ),
"regnet-y-004": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 1_04, 2_08, 4_40] , groups_width=8 ),
"regnet-y-006": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 1_12, 2_56, 6_08] , groups_width=16 ),
"regnet-y-008": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 1_28, 3_20, 7_68] , groups_width=16 ),
"regnet-y-016": ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 1_20, 3_36, 8_88] , groups_width=24 ),
"regnet-y-032": ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 2_16, 5_76, 15_12] , groups_width=24 ),
"regnet-y-040": ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[1_28, 1_92, 5_12, 10_88] , groups_width=64 ),
"regnet-y-064": ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[1_44, 2_88, 5_76, 12_96] , groups_width=72 ),
"regnet-y-080": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[1_68, 4_48, 8_96, 20_16] , groups_width=56 ),
"regnet-y-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[2_24, 4_48, 8_96, 22_40] , groups_width=1_12 ),
"regnet-y-160": ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[2_24, 4_48, 12_32, 30_24] , groups_width=1_12 ),
"regnet-y-320": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"regnet-y-320-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
"regnet-y-640-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[3_28, 9_84, 19_68, 49_20] , groups_width=3_28 ),
"regnet-y-1280-seer": RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[5_28, 10_56, 29_04, 73_92] , groups_width=2_64 ),
"regnet-y-2560-seer": RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[6_40, 16_96, 25_44, 50_88] , groups_width=6_40 ),
"regnet-y-10b-seer": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[20_20, 40_40, 1_11_10, 2_82_80] , groups_width=10_10 ),
# finetuned on imagenet
"regnet-y-320-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
"regnet-y-640-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[3_28, 9_84, 19_68, 49_20] , groups_width=3_28 ),
"regnet-y-1280-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[5_28, 10_56, 29_04, 73_92] , groups_width=2_64 ),
"regnet-y-2560-seer-in1k": ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[6_40, 16_96, 25_44, 50_88] , groups_width=6_40 ),
"regnet-y-10b-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[20_20, 40_40, 1_11_10, 2_82_80] , groups_width=10_10 ),
}
__lowerCAmelCase = NameToOurModelFuncMap()
__lowerCAmelCase = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
__lowerCAmelCase = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , model_dir=str(SCREAMING_SNAKE_CASE_ ) , map_location="cpu" )
__lowerCAmelCase = model_func()
# check if we have a head, if yes add it
__lowerCAmelCase = files["classy_state_dict"]["base_model"]["model"]
__lowerCAmelCase = model_state_dict["trunk"]
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
return model.eval(), model_state_dict["heads"]
# pretrained
__lowerCAmelCase = partial(
SCREAMING_SNAKE_CASE_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__lowerCAmelCase = partial(
SCREAMING_SNAKE_CASE_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__lowerCAmelCase = partial(
SCREAMING_SNAKE_CASE_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__lowerCAmelCase = partial(
SCREAMING_SNAKE_CASE_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=10_10 , w_a=17_44 , w_a=6_20.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
__lowerCAmelCase = partial(
SCREAMING_SNAKE_CASE_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__lowerCAmelCase = partial(
SCREAMING_SNAKE_CASE_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__lowerCAmelCase = partial(
SCREAMING_SNAKE_CASE_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__lowerCAmelCase = partial(
SCREAMING_SNAKE_CASE_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=10_10 , w_a=17_44 , w_a=6_20.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
SCREAMING_SNAKE_CASE_ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
SCREAMING_SNAKE_CASE_ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
return config, expected_shape
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported regnet* architecture,"""
""" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 350 |
import math
def _a ( SCREAMING_SNAKE_CASE_ : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _a ( SCREAMING_SNAKE_CASE_ : float = 0.1 ):
__lowerCAmelCase = 3
__lowerCAmelCase = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(SCREAMING_SNAKE_CASE_ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
def lowercase ( A_ , A_ )-> list:
'''simple docstring'''
if len(A_ ) != 2 or len(a[0] ) != 2 or len(A_ ) != 2 or len(b[0] ) != 2:
raise Exception("Matrices are not 2x2" )
a : Dict = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def lowercase ( A_ , A_ )-> Optional[int]:
'''simple docstring'''
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(A_ ) )
]
def lowercase ( A_ , A_ )-> Tuple:
'''simple docstring'''
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(A_ ) )
]
def lowercase ( A_ )-> tuple[list, list, list, list]:
'''simple docstring'''
if len(A_ ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("Odd matrices are not supported!" )
a : Optional[int] = len(A_ )
a : str = matrix_length // 2
a : Dict = [[a[i][j] for j in range(A_ , A_ )] for i in range(A_ )]
a : Optional[Any] = [
[a[i][j] for j in range(A_ , A_ )] for i in range(A_ , A_ )
]
a : Dict = [[a[i][j] for j in range(A_ )] for i in range(A_ )]
a : Optional[Any] = [[a[i][j] for j in range(A_ )] for i in range(A_ , A_ )]
return top_left, top_right, bot_left, bot_right
def lowercase ( A_ )-> tuple[int, int]:
'''simple docstring'''
return len(A_ ), len(matrix[0] )
def lowercase ( A_ )-> None:
'''simple docstring'''
print("\n".join(str(A_ ) for line in matrix ) )
def lowercase ( A_ , A_ )-> list:
'''simple docstring'''
if matrix_dimensions(A_ ) == (2, 2):
return default_matrix_multiplication(A_ , A_ )
a , a , a , a : List[str] = split_matrix(A_ )
a , a , a , a : Any = split_matrix(A_ )
a : List[str] = actual_strassen(A_ , matrix_subtraction(A_ , A_ ) )
a : Optional[Any] = actual_strassen(matrix_addition(A_ , A_ ) , A_ )
a : int = actual_strassen(matrix_addition(A_ , A_ ) , A_ )
a : List[Any] = actual_strassen(A_ , matrix_subtraction(A_ , A_ ) )
a : List[str] = actual_strassen(matrix_addition(A_ , A_ ) , matrix_addition(A_ , A_ ) )
a : Dict = actual_strassen(matrix_subtraction(A_ , A_ ) , matrix_addition(A_ , A_ ) )
a : str = actual_strassen(matrix_subtraction(A_ , A_ ) , matrix_addition(A_ , A_ ) )
a : Optional[Any] = matrix_addition(matrix_subtraction(matrix_addition(A_ , A_ ) , A_ ) , A_ )
a : Union[str, Any] = matrix_addition(A_ , A_ )
a : Optional[Any] = matrix_addition(A_ , A_ )
a : Any = matrix_subtraction(matrix_subtraction(matrix_addition(A_ , A_ ) , A_ ) , A_ )
# construct the new matrix from our 4 quadrants
a : Optional[Any] = []
for i in range(len(A_ ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(A_ ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def lowercase ( A_ , A_ )-> list:
'''simple docstring'''
if matrix_dimensions(A_ )[1] != matrix_dimensions(A_ )[0]:
a : str = (
"Unable to multiply these matrices, please check the dimensions.\n"
F'''Matrix A: {matrixa}\n'''
F'''Matrix B: {matrixa}'''
)
raise Exception(A_ )
a : Any = matrix_dimensions(A_ )
a : Optional[Any] = matrix_dimensions(A_ )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
a : Optional[Any] = max(*A_ , *A_ )
a : Optional[Any] = int(math.pow(2 , math.ceil(math.loga(A_ ) ) ) )
a : Union[str, Any] = matrixa
a : Optional[int] = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , A_ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , A_ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , A_ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
a : Dict = actual_strassen(A_ , A_ )
# Removing the additional zeros
for i in range(0 , A_ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , A_ ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
__lowercase = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
__lowercase = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 40 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : int = """dpr"""
def __init__( self : List[Any] , __UpperCAmelCase : int=30522 , __UpperCAmelCase : Union[str, Any]=768 , __UpperCAmelCase : Dict=12 , __UpperCAmelCase : List[str]=12 , __UpperCAmelCase : Any=3072 , __UpperCAmelCase : Optional[int]="gelu" , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : str=512 , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : Tuple=0.02 , __UpperCAmelCase : List[str]=1e-12 , __UpperCAmelCase : List[str]=0 , __UpperCAmelCase : str="absolute" , __UpperCAmelCase : int = 0 , **__UpperCAmelCase : Tuple , ):
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase)
a : List[Any] = vocab_size
a : Optional[Any] = hidden_size
a : Union[str, Any] = num_hidden_layers
a : Dict = num_attention_heads
a : int = hidden_act
a : Any = intermediate_size
a : Any = hidden_dropout_prob
a : Dict = attention_probs_dropout_prob
a : Any = max_position_embeddings
a : Union[str, Any] = type_vocab_size
a : Optional[Any] = initializer_range
a : Dict = layer_norm_eps
a : int = projection_dim
a : str = position_embedding_type
| 40 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
lowerCAmelCase: List[Any] = logging.get_logger(__name__)
class a__( lowerCamelCase__ ):
def __init__( self : Optional[Any] , *__snake_case : str , **__snake_case : List[str] ):
warnings.warn(
'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DPTImageProcessor instead.' , __snake_case , )
super().__init__(*__snake_case , **__snake_case ) | 96 |
'''simple docstring'''
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def lowerCamelCase__ ( _A , _A , _A=0 ):
# Format the message.
if name is None:
a : Tuple = None
else:
a : Dict = '.' * max(0 , spaces - 2 ) + '# {:' + str(50 - spaces ) + 's}'
a : Tuple = fmt.format(_A )
# Print and recurse (if needed).
if isinstance(_A , _A ):
if msg is not None:
print(_A )
for k in val.keys():
recursive_print(_A , val[k] , spaces + 2 )
elif isinstance(_A , torch.Tensor ):
print(_A , ':' , val.size() )
else:
print(_A , ':' , _A )
def lowerCamelCase__ ( _A , _A , _A , _A , _A ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
a : str = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
a : List[Any] = (num_heads, hidden_size, num_splits) + input_shape[1:]
a : int = param.view(*_A )
a : List[str] = param.transpose(0 , 2 )
a : Union[str, Any] = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
a : Union[str, Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
a : List[str] = param.view(*_A )
a : Union[str, Any] = param.transpose(0 , 1 ).contiguous()
a : List[Any] = param.view(*_A )
return param
def lowerCamelCase__ ( _A , _A , _A ):
# The converted output model.
a : Optional[Any] = {}
# old versions did not store training args
a : Dict = input_state_dict.get('args' , _A )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
a : Union[str, Any] = ds_args.padded_vocab_size
a : str = ds_args.max_position_embeddings
a : Dict = ds_args.hidden_size
a : Union[str, Any] = ds_args.num_layers
a : Dict = ds_args.num_attention_heads
a : int = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
a : Any = config.n_head
# The hidden_size per head.
a : Tuple = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
a : Any = input_state_dict['checkpoint_version']
else:
a : Any = 0.0
# The model.
a : Optional[int] = input_state_dict['model']
# The language model.
a : Optional[Any] = model['language_model']
# The embeddings.
a : List[str] = lm['embedding']
# The word embeddings.
a : List[Any] = embeddings['word_embeddings']['weight']
# Truncate the embedding table to vocab_size rows.
a : Dict = word_embeddings[: config.vocab_size, :]
a : int = word_embeddings
# The position embeddings.
a : Tuple = embeddings['position_embeddings']['weight']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
a : List[str] = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f"""pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match""" )
# Store the position embeddings.
a : Optional[Any] = pos_embeddings
# The transformer.
a : Union[str, Any] = lm['transformer'] if 'transformer' in lm.keys() else lm['encoder']
# The regex to extract layer names.
a : List[Any] = re.compile(r'layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)' )
# The simple map of names for "automated" rules.
a : Optional[Any] = {
'attention.dense': '.attn.c_proj.',
'self_attention.dense': '.attn.c_proj.',
'mlp.dense_h_to_4h': '.mlp.c_fc.',
'mlp.dense_4h_to_h': '.mlp.c_proj.',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
a : Tuple = layer_re.match(_A )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
a : Union[str, Any] = int(m.group(1 ) )
# The name of the operation.
a : Optional[int] = m.group(2 )
# Is it a weight or a bias?
a : Optional[int] = m.group(3 )
# The name of the layer.
a : Any = f"""transformer.h.{layer_idx}"""
# For layernorm(s), simply store the layer norm.
if op_name.endswith('layernorm' ):
a : str = 'ln_1' if op_name.startswith('input' ) else 'ln_2'
a : Tuple = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
a : Dict = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , _A , _A )
a : Optional[Any] = causal_mask
# Insert a "dummy" tensor for masked_bias.
a : List[Any] = torch.tensor(-1E4 , dtype=torch.floataa )
a : List[str] = masked_bias
a : Union[str, Any] = fix_query_key_value_ordering(_A , _A , 3 , _A , _A )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
a : int = out_val.transpose(0 , 1 ).contiguous()
# Store.
a : int = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
a : str = fix_query_key_value_ordering(_A , _A , 3 , _A , _A )
# Store. No change of shape.
a : List[str] = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
a : Tuple = megatron_to_transformers[op_name]
a : List[str] = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
a : Dict = megatron_to_transformers[op_name]
a : Optional[Any] = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
a : str = transformer['final_layernorm.weight']
a : List[str] = transformer['final_layernorm.bias']
# For LM head, transformers' wants the matrix to weight embeddings.
a : Optional[int] = word_embeddings
# It should be done!
return output_state_dict
def lowerCamelCase__ ( ):
# Create the argument parser.
a : Dict = argparse.ArgumentParser()
parser.add_argument('--print-checkpoint-structure' , action='store_true' )
parser.add_argument(
'path_to_checkpoint' , type=_A , help='Path to the checkpoint file (.zip archive or direct .pt file)' , )
parser.add_argument(
'--config_file' , default='' , type=_A , help='An optional config json file describing the pre-trained model.' , )
a : Union[str, Any] = parser.parse_args()
# Extract the basename.
a : Optional[Any] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f"""Extracting PyTorch state dictionary from {args.path_to_checkpoint}""" )
if args.path_to_checkpoint.endswith('.zip' ):
with zipfile.ZipFile(args.path_to_checkpoint , 'r' ) as checkpoint:
with checkpoint.open('release/mp_rank_00/model_optim_rng.pt' ) as pytorch_dict:
a : Union[str, Any] = torch.load(_A , map_location='cpu' )
else:
a : Any = torch.load(args.path_to_checkpoint , map_location='cpu' )
a : List[Any] = input_state_dict.get('args' , _A )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
a : int = 'gelu_fast'
elif ds_args.openai_gelu:
a : Dict = 'gelu_new'
else:
a : Any = 'gelu'
else:
# in the very early days this used to be "gelu_new"
a : Any = 'gelu_new'
# Spell out all parameters in case the defaults change.
a : Tuple = GPTaConfig(
vocab_size=5_0257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=_A , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type='cls_index' , summary_use_proj=_A , summary_activation=_A , summary_proj_to_labels=_A , summary_first_dropout=0.1 , scale_attn_weights=_A , use_cache=_A , bos_token_id=5_0256 , eos_token_id=5_0256 , )
else:
a : str = GPTaConfig.from_json_file(args.config_file )
a : Any = ['GPT2LMHeadModel']
# Convert.
print('Converting' )
a : Union[str, Any] = convert_megatron_checkpoint(_A , _A , _A )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(_A , _A )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
a : Union[str, Any] = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
a : Tuple = 'gpt2'
elif tokenizer_type == "PretrainedFromHF":
a : List[str] = ds_args.tokenizer_name_or_path
else:
raise ValueError(f"""Unrecognized tokenizer_type {tokenizer_type}""" )
else:
a : Optional[Any] = 'gpt2'
a : Tuple = AutoTokenizer.from_pretrained(_A )
a : str = type(_A ).__name__
a : List[str] = tokenizer_class
# Store the config to file.
print('Saving config' )
config.save_pretrained(_A )
# Save tokenizer based on args
print(f"""Adding {tokenizer_class} tokenizer files""" )
tokenizer.save_pretrained(_A )
# Store the state_dict to file.
a : Optional[int] = os.path.join(_A , 'pytorch_model.bin' )
print(f"""Saving checkpoint to \"{output_checkpoint_file}\"""" )
torch.save(_A , _A )
####################################################################################################
if __name__ == "__main__":
main()
#################################################################################################### | 96 | 1 |
def lowerCAmelCase_ ( __a , __a ) -> Union[str, Any]:
"""simple docstring"""
return int((input_a, input_a).count(0 ) != 0 )
def lowerCAmelCase_ ( ) -> Tuple:
"""simple docstring"""
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 10 |
"""simple docstring"""
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
lowerCAmelCase__ = get_logger(__name__)
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a : Optional[Any] ="dummy_data"
a : int ="datasets"
a : Tuple =False
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = False , snake_case__ = True , snake_case__ = None , ):
"""simple docstring"""
lowerCAmelCase : Tuple = 0
lowerCAmelCase : int = dataset_name
lowerCAmelCase : List[Any] = cache_dir
lowerCAmelCase : List[str] = use_local_dummy_data
lowerCAmelCase : List[str] = config
# download_callbacks take a single url as input
lowerCAmelCase : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowerCAmelCase : Tuple = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowerCAmelCase : Union[str, Any] = str(snake_case__ )
# to be downloaded
lowerCAmelCase : List[Any] = None
lowerCAmelCase : List[Any] = None
@property
def lowercase__ ( self ):
"""simple docstring"""
if self._dummy_file is None:
lowerCAmelCase : Any = self.download_dummy_data()
return self._dummy_file
@property
def lowercase__ ( self ):
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("dummy" , self.version_name )
@property
def lowercase__ ( self ):
"""simple docstring"""
return os.path.join(self.dummy_data_folder , "dummy_data.zip" )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowerCAmelCase : str = cached_path(
snake_case__ , cache_dir=self.cache_dir , extract_compressed_file=snake_case__ , force_extract=snake_case__ )
return os.path.join(snake_case__ , self.dummy_file_name )
@property
def lowercase__ ( self ):
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def lowercase__ ( self ):
"""simple docstring"""
if self._bucket_url is None:
lowerCAmelCase : Union[str, Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) )
return self._bucket_url
@property
def lowercase__ ( self ):
"""simple docstring"""
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] )
def lowercase__ ( self , snake_case__ , *snake_case__ ):
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowerCAmelCase : int = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowerCAmelCase : List[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(snake_case__ , snake_case__ ):
return self.create_dummy_data_dict(snake_case__ , snake_case__ )
elif isinstance(snake_case__ , (list, tuple) ):
return self.create_dummy_data_list(snake_case__ , snake_case__ )
else:
return self.create_dummy_data_single(snake_case__ , snake_case__ )
def lowercase__ ( self , snake_case__ , *snake_case__ ):
"""simple docstring"""
return self.download_and_extract(snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
return self.download_and_extract(snake_case__ )
def lowercase__ ( self , snake_case__ , *snake_case__ , **snake_case__ ):
"""simple docstring"""
return path
def lowercase__ ( self ):
"""simple docstring"""
return {}
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[Any] = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(snake_case__ , snake_case__ ):
for single_url in single_urls:
download_callback(snake_case__ )
else:
lowerCAmelCase : List[str] = single_urls
download_callback(snake_case__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : Tuple = [os.path.join(snake_case__ , urllib.parse.quote_plus(Path(snake_case__ ).name ) ) for x in single_urls]
else:
lowerCAmelCase : int = single_urls
lowerCAmelCase : Any = os.path.join(snake_case__ , urllib.parse.quote_plus(Path(snake_case__ ).name ) )
lowerCAmelCase : Union[str, Any] = value
# make sure that values are unique
if all(isinstance(snake_case__ , snake_case__ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowerCAmelCase : Union[str, Any] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowerCAmelCase : Optional[Any] = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , snake_case__ ) ) for url in data_url )
lowerCAmelCase : Any = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowerCAmelCase : int = [data_url[0]] * len(snake_case__ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(snake_case__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowerCAmelCase : Dict = os.path.join(snake_case__ , urllib.parse.quote_plus(single_url.split("/" )[-1] ) )
dummy_data_list.append(snake_case__ )
return dummy_data_list
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(snake_case__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowerCAmelCase : Tuple = os.path.join(snake_case__ , urllib.parse.quote_plus(data_url.split("/" )[-1] ) )
if os.path.exists(snake_case__ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
def _iter_archive_members(snake_case__ ):
# this preserves the order of the members inside the ZIP archive
lowerCAmelCase : str = Path(self.dummy_file ).parent
lowerCAmelCase : Optional[Any] = path.relative_to(snake_case__ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowerCAmelCase : List[Any] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(snake_case__ )
lowerCAmelCase : List[Any] = Path(snake_case__ )
lowerCAmelCase : str = _iter_archive_members(snake_case__ ) if self.use_local_dummy_data else path.rglob("*" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__") ):
yield file_path.relative_to(snake_case__ ).as_posix(), file_path.open("rb" )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : List[Any] = [paths]
for path in paths:
if os.path.isfile(snake_case__ ):
if os.path.basename(snake_case__ ).startswith((".", "__") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(snake_case__ ):
if os.path.basename(snake_case__ ).startswith((".", "__") ):
continue
dirnames.sort()
for filename in sorted(snake_case__ ):
if filename.startswith((".", "__") ):
continue
yield os.path.join(snake_case__ , snake_case__ )
| 108 | 0 |
from collections.abc import Sequence
def a__ ( A__, A__ = False ):
if not arr:
return 0
SCREAMING_SNAKE_CASE_ : str = 0 if allow_empty_subarrays else float('-inf' )
SCREAMING_SNAKE_CASE_ : Tuple = 0.0
for num in arr:
SCREAMING_SNAKE_CASE_ : int = max(0 if allow_empty_subarrays else num, curr_sum + num )
SCREAMING_SNAKE_CASE_ : List[Any] = max(A__, A__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
lowerCAmelCase__ : Union[str, Any] =[-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F"""{max_subarray_sum(nums) = }""")
| 162 |
from __future__ import annotations
class __lowercase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ = 0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = key
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_5_5
return [chr(ord(lowerCAmelCase__ ) ^ key ) for ch in content]
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_5_5
return [chr(ord(lowerCAmelCase__ ) ^ key ) for ch in content]
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = 0 ):
"""simple docstring"""
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = key or self.__key or 1
# make sure key can be any size
while key > 2_5_5:
key -= 2_5_5
# This will be returned
SCREAMING_SNAKE_CASE_ : Dict = ''
for ch in content:
ans += chr(ord(lowerCAmelCase__ ) ^ key )
return ans
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = 0 ):
"""simple docstring"""
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = key or self.__key or 1
# make sure key can be any size
while key > 2_5_5:
key -= 2_5_5
# This will be returned
SCREAMING_SNAKE_CASE_ : str = ''
for ch in content:
ans += chr(ord(lowerCAmelCase__ ) ^ key )
return ans
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = 0 ):
"""simple docstring"""
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
try:
with open(lowerCAmelCase__ ) as fin, open('encrypt.out' , 'w+' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(lowerCAmelCase__ , lowerCAmelCase__ ) )
except OSError:
return False
return True
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
try:
with open(lowerCAmelCase__ ) as fin, open('decrypt.out' , 'w+' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(lowerCAmelCase__ , lowerCAmelCase__ ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 162 | 1 |
"""simple docstring"""
import datasets
from .evaluate import evaluate
_UpperCamelCase : Dict = '\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n'
_UpperCamelCase : str = '\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n'
_UpperCamelCase : List[str] = '\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the SQuAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> squad_metric = datasets.load_metric("squad")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def UpperCamelCase_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': {'id': datasets.Value('string' ), 'prediction_text': datasets.Value('string' )},
'references': {
'id': datasets.Value('string' ),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
},
} ) , codebase_urls=['https://rajpurkar.github.io/SQuAD-explorer/'] , reference_urls=['https://rajpurkar.github.io/SQuAD-explorer/'] , )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase ):
lowercase = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
lowercase = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
lowercase = evaluate(dataset=lowerCamelCase_ , predictions=lowerCamelCase_ )
return score
| 220 |
from ..utils import DummyObject, requires_backends
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["flax"]
def __init__( self: str ,*lowerCamelCase_: int ,**lowerCamelCase_: List[str] ) -> str:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[Any] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: List[str] ) -> Any:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[int] ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: int ) -> Optional[int]:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Any = ["flax"]
def __init__( self: int ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Tuple ) -> Union[str, Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[int] ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: List[str] ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: Any ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Dict = ["flax"]
def __init__( self: Dict ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: List[Any] ) -> Any:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: List[Any] ) -> str:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Optional[Any] ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : List[str] = ["flax"]
def __init__( self: str ,*lowerCamelCase_: List[str] ,**lowerCamelCase_: Optional[int] ) -> Union[str, Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Union[str, Any] ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> Any:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: int ,**lowerCamelCase_: Optional[Any] ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : int = ["flax"]
def __init__( self: Dict ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: List[str] ) -> Optional[Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[Any] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Any:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Union[str, Any] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Optional[Any] ) -> str:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[int] = ["flax"]
def __init__( self: str ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Optional[int] ) -> Tuple:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: int ,**lowerCamelCase_: Tuple ) -> List[str]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: Optional[Any] ) -> Any:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : List[Any] = ["flax"]
def __init__( self: Union[str, Any] ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: int ) -> List[Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Dict ) -> Dict:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Any:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["flax"]
def __init__( self: str ,*lowerCamelCase_: Any ,**lowerCamelCase_: int ) -> Tuple:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: Optional[int] ,**lowerCamelCase_: Union[str, Any] ) -> List[str]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: Dict ) -> Optional[int]:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : str = ["flax"]
def __init__( self: Optional[Any] ,*lowerCamelCase_: str ,**lowerCamelCase_: List[str] ) -> Optional[Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: List[str] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: int ) -> List[str]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: str ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: int ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Union[str, Any] = ["flax"]
def __init__( self: Any ,*lowerCamelCase_: Tuple ,**lowerCamelCase_: Optional[int] ) -> List[str]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[int] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: str ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: List[Any] ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Tuple = ["flax"]
def __init__( self: Any ,*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: Dict ) -> str:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Tuple ,*lowerCamelCase_: Union[str, Any] ,**lowerCamelCase_: List[str] ) -> int:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: List[Any] ,*lowerCamelCase_: str ,**lowerCamelCase_: str ) -> Any:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[Any] = ["flax"]
def __init__( self: Dict ,*lowerCamelCase_: int ,**lowerCamelCase_: Optional[Any] ) -> Union[str, Any]:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: int ,**lowerCamelCase_: Tuple ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: Optional[Any] ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Optional[int] ) -> int:
requires_backends(cls ,["""flax"""] )
class _snake_case ( metaclass=__snake_case ):
'''simple docstring'''
A__ : Optional[int] = ["flax"]
def __init__( self: List[str] ,*lowerCamelCase_: Dict ,**lowerCamelCase_: Dict ) -> int:
requires_backends(self ,["""flax"""] )
@classmethod
def A__ ( cls: Dict ,*lowerCamelCase_: List[Any] ,**lowerCamelCase_: Dict ) -> Union[str, Any]:
requires_backends(cls ,["""flax"""] )
@classmethod
def A__ ( cls: int ,*lowerCamelCase_: Any ,**lowerCamelCase_: Any ) -> Optional[Any]:
requires_backends(cls ,["""flax"""] )
| 345 | 0 |
"""simple docstring"""
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
) | 369 | """simple docstring"""
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''',
}
class _snake_case ( a__ ):
snake_case__ = "efficientnet"
def __init__( self : Dict , UpperCAmelCase : int = 3 , UpperCAmelCase : int = 600 , UpperCAmelCase : float = 2.0 , UpperCAmelCase : float = 3.1 , UpperCAmelCase : int = 8 , UpperCAmelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , UpperCAmelCase : List[int] = [32, 16, 24, 40, 80, 112, 192] , UpperCAmelCase : List[int] = [16, 24, 40, 80, 112, 192, 320] , UpperCAmelCase : List[int] = [] , UpperCAmelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , UpperCAmelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , UpperCAmelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , UpperCAmelCase : float = 0.2_5 , UpperCAmelCase : str = "swish" , UpperCAmelCase : int = 2560 , UpperCAmelCase : str = "mean" , UpperCAmelCase : float = 0.0_2 , UpperCAmelCase : float = 0.0_0_1 , UpperCAmelCase : float = 0.9_9 , UpperCAmelCase : float = 0.5 , UpperCAmelCase : float = 0.2 , **UpperCAmelCase : Union[str, Any] , ):
super().__init__(**UpperCAmelCase )
__lowerCamelCase : Dict = num_channels
__lowerCamelCase : str = image_size
__lowerCamelCase : Any = width_coefficient
__lowerCamelCase : Any = depth_coefficient
__lowerCamelCase : Any = depth_divisor
__lowerCamelCase : Optional[Any] = kernel_sizes
__lowerCamelCase : Union[str, Any] = in_channels
__lowerCamelCase : List[Any] = out_channels
__lowerCamelCase : Optional[Any] = depthwise_padding
__lowerCamelCase : int = strides
__lowerCamelCase : int = num_block_repeats
__lowerCamelCase : Optional[Any] = expand_ratios
__lowerCamelCase : int = squeeze_expansion_ratio
__lowerCamelCase : Any = hidden_act
__lowerCamelCase : Optional[Any] = hidden_dim
__lowerCamelCase : Union[str, Any] = pooling_type
__lowerCamelCase : Optional[Any] = initializer_range
__lowerCamelCase : Tuple = batch_norm_eps
__lowerCamelCase : Optional[int] = batch_norm_momentum
__lowerCamelCase : Any = dropout_rate
__lowerCamelCase : List[Any] = drop_connect_rate
__lowerCamelCase : int = sum(UpperCAmelCase ) * 4
class _snake_case ( a__ ):
snake_case__ = version.parse("1.11" )
@property
def lowerCamelCase__ ( self : Union[str, Any] ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowerCamelCase__ ( self : List[Any] ):
return 1E-5 | 64 | 0 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : Any = (DDPMScheduler,)
def SCREAMING_SNAKE_CASE__ ( self : Dict , **_UpperCAmelCase : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = {
"""num_train_timesteps""": 10_00,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**_UpperCAmelCase )
return config
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
self.check_over_configs(thresholding=_UpperCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_UpperCAmelCase , prediction_type=_UpperCAmelCase , sample_max_value=_UpperCAmelCase , )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.02 ) ) < 1E-5
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase )
UpperCAmelCase__ = len(_UpperCAmelCase )
UpperCAmelCase__ = self.dummy_model()
UpperCAmelCase__ = self.dummy_sample_deter
UpperCAmelCase__ = torch.manual_seed(0 )
for t in reversed(range(_UpperCAmelCase ) ):
# 1. predict noise residual
UpperCAmelCase__ = model(_UpperCAmelCase , _UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
UpperCAmelCase__ = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
UpperCAmelCase__ = pred_prev_sample
UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) )
UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config(prediction_type="""v_prediction""" )
UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase )
UpperCAmelCase__ = len(_UpperCAmelCase )
UpperCAmelCase__ = self.dummy_model()
UpperCAmelCase__ = self.dummy_sample_deter
UpperCAmelCase__ = torch.manual_seed(0 )
for t in reversed(range(_UpperCAmelCase ) ):
# 1. predict noise residual
UpperCAmelCase__ = model(_UpperCAmelCase , _UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
UpperCAmelCase__ = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
UpperCAmelCase__ = pred_prev_sample
UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) )
UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase )
UpperCAmelCase__ = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
UpperCAmelCase__ = scheduler.timesteps
for i, timestep in enumerate(_UpperCAmelCase ):
if i == len(_UpperCAmelCase ) - 1:
UpperCAmelCase__ = -1
else:
UpperCAmelCase__ = timesteps[i + 1]
UpperCAmelCase__ = scheduler.previous_timestep(_UpperCAmelCase )
UpperCAmelCase__ = prev_t.item()
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase )
UpperCAmelCase__ = [1_00, 87, 50, 51, 0]
with self.assertRaises(_UpperCAmelCase , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase )
UpperCAmelCase__ = [1_00, 87, 50, 1, 0]
UpperCAmelCase__ = len(_UpperCAmelCase )
with self.assertRaises(_UpperCAmelCase , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=_UpperCAmelCase , timesteps=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase )
UpperCAmelCase__ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_UpperCAmelCase , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
| 346 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 346 | 1 |
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
lowerCamelCase = (720, 1_280) # Height, Width
lowerCamelCase = (0.4, 0.6) # if height or width lower than this scale, drop it.
lowerCamelCase = 1 / 100
lowerCamelCase = """"""
lowerCamelCase = """"""
lowerCamelCase = """"""
lowerCamelCase = 250
def a__ ( ):
UpperCAmelCase_ , UpperCAmelCase_ = get_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
for index in range(lowerCAmelCase__ ):
UpperCAmelCase_ = random.sample(range(len(lowerCAmelCase__ ) ) , 4 )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = update_image_and_anno(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , filter_scale=lowerCAmelCase__ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
UpperCAmelCase_ = random_chars(32 )
UpperCAmelCase_ = path.split(os.sep )[-1].rsplit("." , 1 )[0]
UpperCAmelCase_ = f"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(f"""{file_root}.jpg""" , lowerCAmelCase__ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
UpperCAmelCase_ = []
for anno in new_annos:
UpperCAmelCase_ = anno[3] - anno[1]
UpperCAmelCase_ = anno[4] - anno[2]
UpperCAmelCase_ = anno[1] + width / 2
UpperCAmelCase_ = anno[2] + height / 2
UpperCAmelCase_ = f"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(lowerCAmelCase__ )
with open(f"""{file_root}.txt""" , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for label_file in glob.glob(os.path.join(lowerCAmelCase__ , "*.txt" ) ):
UpperCAmelCase_ = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(lowerCAmelCase__ ) as in_file:
UpperCAmelCase_ = in_file.readlines()
UpperCAmelCase_ = os.path.join(lowerCAmelCase__ , f"""{label_name}.jpg""" )
UpperCAmelCase_ = []
for obj_list in obj_lists:
UpperCAmelCase_ = obj_list.rstrip("\n" ).split(" " )
UpperCAmelCase_ = float(obj[1] ) - float(obj[3] ) / 2
UpperCAmelCase_ = float(obj[2] ) - float(obj[4] ) / 2
UpperCAmelCase_ = float(obj[1] ) + float(obj[3] ) / 2
UpperCAmelCase_ = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(lowerCAmelCase__ )
labels.append(lowerCAmelCase__ )
return img_paths, labels
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 0.0 , ):
UpperCAmelCase_ = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
UpperCAmelCase_ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
UpperCAmelCase_ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
UpperCAmelCase_ = int(scale_x * output_size[1] )
UpperCAmelCase_ = int(scale_y * output_size[0] )
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for i, index in enumerate(lowerCAmelCase__ ):
UpperCAmelCase_ = all_img_list[index]
path_list.append(lowerCAmelCase__ )
UpperCAmelCase_ = all_annos[index]
UpperCAmelCase_ = cva.imread(lowerCAmelCase__ )
if i == 0: # top-left
UpperCAmelCase_ = cva.resize(lowerCAmelCase__ , (divid_point_x, divid_point_y) )
UpperCAmelCase_ = img
for bbox in img_annos:
UpperCAmelCase_ = bbox[1] * scale_x
UpperCAmelCase_ = bbox[2] * scale_y
UpperCAmelCase_ = bbox[3] * scale_x
UpperCAmelCase_ = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
UpperCAmelCase_ = cva.resize(lowerCAmelCase__ , (output_size[1] - divid_point_x, divid_point_y) )
UpperCAmelCase_ = img
for bbox in img_annos:
UpperCAmelCase_ = scale_x + bbox[1] * (1 - scale_x)
UpperCAmelCase_ = bbox[2] * scale_y
UpperCAmelCase_ = scale_x + bbox[3] * (1 - scale_x)
UpperCAmelCase_ = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
UpperCAmelCase_ = cva.resize(lowerCAmelCase__ , (divid_point_x, output_size[0] - divid_point_y) )
UpperCAmelCase_ = img
for bbox in img_annos:
UpperCAmelCase_ = bbox[1] * scale_x
UpperCAmelCase_ = scale_y + bbox[2] * (1 - scale_y)
UpperCAmelCase_ = bbox[3] * scale_x
UpperCAmelCase_ = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
UpperCAmelCase_ = cva.resize(
lowerCAmelCase__ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
UpperCAmelCase_ = img
for bbox in img_annos:
UpperCAmelCase_ = scale_x + bbox[1] * (1 - scale_x)
UpperCAmelCase_ = scale_y + bbox[2] * (1 - scale_y)
UpperCAmelCase_ = scale_x + bbox[3] * (1 - scale_x)
UpperCAmelCase_ = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
UpperCAmelCase_ = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def a__ ( lowerCAmelCase__ ):
assert number_char > 1, "The number of character should greater than 1"
UpperCAmelCase_ = ascii_lowercase + digits
return "".join(random.choice(lowerCAmelCase__ ) for _ in range(lowerCAmelCase__ ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 241 |
"""simple docstring"""
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = checkpoint
UpperCAmelCase_ = {}
UpperCAmelCase_ = vae_state_dict["encoder.conv_in.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_in.bias"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_out.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_out.bias"]
UpperCAmelCase_ = vae_state_dict["encoder.norm_out.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.norm_out.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_in.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_in.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_out.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_out.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.norm_out.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.norm_out.bias"]
UpperCAmelCase_ = vae_state_dict["quant_conv.weight"]
UpperCAmelCase_ = vae_state_dict["quant_conv.bias"]
UpperCAmelCase_ = vae_state_dict["post_quant_conv.weight"]
UpperCAmelCase_ = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
UpperCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
UpperCAmelCase_ = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(lowerCAmelCase__ )
}
# Retrieves the keys for the decoder up blocks only
UpperCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
UpperCAmelCase_ = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(lowerCAmelCase__ )
}
for i in range(lowerCAmelCase__ ):
UpperCAmelCase_ = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
UpperCAmelCase_ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
UpperCAmelCase_ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
UpperCAmelCase_ = renew_vae_resnet_paths(lowerCAmelCase__ )
UpperCAmelCase_ = {"old": f"""down.{i}.block""", "new": f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ )
UpperCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.block" in key]
UpperCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
UpperCAmelCase_ = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
UpperCAmelCase_ = renew_vae_resnet_paths(lowerCAmelCase__ )
UpperCAmelCase_ = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ )
UpperCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.attn" in key]
UpperCAmelCase_ = renew_vae_attention_paths(lowerCAmelCase__ )
UpperCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ )
conv_attn_to_linear(lowerCAmelCase__ )
for i in range(lowerCAmelCase__ ):
UpperCAmelCase_ = num_up_blocks - 1 - i
UpperCAmelCase_ = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
UpperCAmelCase_ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
UpperCAmelCase_ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
UpperCAmelCase_ = renew_vae_resnet_paths(lowerCAmelCase__ )
UpperCAmelCase_ = {"old": f"""up.{block_id}.block""", "new": f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ )
UpperCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.block" in key]
UpperCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
UpperCAmelCase_ = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
UpperCAmelCase_ = renew_vae_resnet_paths(lowerCAmelCase__ )
UpperCAmelCase_ = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ )
UpperCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.attn" in key]
UpperCAmelCase_ = renew_vae_attention_paths(lowerCAmelCase__ )
UpperCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ )
conv_attn_to_linear(lowerCAmelCase__ )
return new_checkpoint
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , ):
# Only support V1
UpperCAmelCase_ = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
UpperCAmelCase_ = io.BytesIO(r.content )
UpperCAmelCase_ = OmegaConf.load(lowerCAmelCase__ )
UpperCAmelCase_ = 512
UpperCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
UpperCAmelCase_ = {}
with safe_open(lowerCAmelCase__ , framework="pt" , device="cpu" ) as f:
for key in f.keys():
UpperCAmelCase_ = f.get_tensor(lowerCAmelCase__ )
else:
UpperCAmelCase_ = torch.load(lowerCAmelCase__ , map_location=lowerCAmelCase__ )["state_dict"]
# Convert the VAE model.
UpperCAmelCase_ = create_vae_diffusers_config(lowerCAmelCase__ , image_size=lowerCAmelCase__ )
UpperCAmelCase_ = custom_convert_ldm_vae_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = AutoencoderKL(**lowerCAmelCase__ )
vae.load_state_dict(lowerCAmelCase__ )
vae.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--vae_pt_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
lowerCamelCase = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 241 | 1 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
def lowerCAmelCase ( _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("Quantized models are not supported." )
UpperCAmelCase__ = re.match(R"^mobilenet_v1_([^_]*)_([^_]*)$" , UpperCamelCase__ )
if matches:
UpperCAmelCase__ = float(matches[1] )
UpperCAmelCase__ = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
UpperCAmelCase__ = 1001
UpperCAmelCase__ = "imagenet-1k-id2label.json"
UpperCAmelCase__ = "huggingface/label-files"
UpperCAmelCase__ = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase__ = {int(UpperCamelCase__ ) + 1: v for k, v in idalabel.items()}
UpperCAmelCase__ = "background"
UpperCAmelCase__ = idalabel
UpperCAmelCase__ = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase ( ):
"""simple docstring"""
UpperCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase__ = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int]=False ):
"""simple docstring"""
UpperCAmelCase__ = get_mobilenet_va_config(UpperCamelCase__ )
# Load 🤗 model
UpperCAmelCase__ = MobileNetVaForImageClassification(UpperCamelCase__ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
UpperCAmelCase__ = MobileNetVaImageProcessor(
crop_size={"width": config.image_size, "height": config.image_size} , size={"shortest_edge": config.image_size + 32} , )
UpperCAmelCase__ = image_processor(images=prepare_img() , return_tensors="pt" )
UpperCAmelCase__ = model(**UpperCamelCase__ )
UpperCAmelCase__ = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
UpperCAmelCase__ = torch.tensor([-4.1_739, -1.1_233, 3.1_205] )
elif model_name == "mobilenet_v1_0.75_192":
UpperCAmelCase__ = torch.tensor([-3.9_440, -2.3_141, -0.3_333] )
else:
UpperCAmelCase__ = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1E-4 )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print("Pushing to the hub..." )
UpperCAmelCase__ = "google/" + model_name
image_processor.push_to_hub(UpperCamelCase__ )
model.push_to_hub(UpperCamelCase__ )
if __name__ == "__main__":
_lowerCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="mobilenet_v1_1.0_224",
type=str,
help="Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.",
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original TensorFlow checkpoint (.ckpt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowerCAmelCase : str = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 169 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self,__lowerCamelCase,__lowerCamelCase=3,__lowerCamelCase=32,__lowerCamelCase=3,__lowerCamelCase=10,__lowerCamelCase=[10, 20, 30, 40],__lowerCamelCase=[1, 1, 2, 1],__lowerCamelCase=True,__lowerCamelCase=True,__lowerCamelCase="relu",__lowerCamelCase=3,__lowerCamelCase=None,):
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = embeddings_size
A__ = hidden_sizes
A__ = depths
A__ = is_training
A__ = use_labels
A__ = hidden_act
A__ = num_labels
A__ = scope
A__ = len(__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = self.get_config()
return config, pixel_values
def UpperCamelCase ( self ):
return RegNetConfig(
num_channels=self.num_channels,embeddings_size=self.embeddings_size,hidden_sizes=self.hidden_sizes,depths=self.depths,hidden_act=self.hidden_act,num_labels=self.num_labels,image_size=self.image_size,)
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
A__ = FlaxRegNetModel(config=__lowerCamelCase )
A__ = model(__lowerCamelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32),)
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
A__ = self.num_labels
A__ = FlaxRegNetForImageClassification(config=__lowerCamelCase )
A__ = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def UpperCamelCase ( self ):
A__ = self.prepare_config_and_inputs()
A__ , A__ = config_and_inputs
A__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def UpperCamelCase ( self ):
A__ = FlaxRegNetModelTester(self )
A__ = ConfigTester(self,config_class=__lowerCamelCase,has_text_modality=__lowerCamelCase )
def UpperCamelCase ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase ( self ):
return
def UpperCamelCase ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def UpperCamelCase ( self ):
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(__lowerCamelCase )
A__ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1],__lowerCamelCase )
def UpperCamelCase ( self ):
def check_hidden_states_output(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
A__ = model_class(__lowerCamelCase )
A__ = model(**self._prepare_for_class(__lowerCamelCase,__lowerCamelCase ) )
A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A__ = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ),expected_num_stages + 1 )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase )
def UpperCamelCase ( self ):
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A__ = self._prepare_for_class(__lowerCamelCase,__lowerCamelCase )
A__ = model_class(__lowerCamelCase )
@jax.jit
def model_jitted(__lowerCamelCase,**__lowerCamelCase ):
return model(pixel_values=__lowerCamelCase,**__lowerCamelCase )
with self.subTest('''JIT Enabled''' ):
A__ = model_jitted(**__lowerCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
A__ = model_jitted(**__lowerCamelCase ).to_tuple()
self.assertEqual(len(__lowerCamelCase ),len(__lowerCamelCase ) )
for jitted_output, output in zip(__lowerCamelCase,__lowerCamelCase ):
self.assertEqual(jitted_output.shape,output.shape )
def UpperCamelCase__( )->Optional[int]:
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self ):
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def UpperCamelCase ( self ):
A__ = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=__lowerCamelCase,return_tensors='''np''' )
A__ = model(**__lowerCamelCase )
# verify the logits
A__ = (1, 1000)
self.assertEqual(outputs.logits.shape,__lowerCamelCase )
A__ = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3],__lowerCamelCase,atol=1E-4 ) )
| 193 | 0 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Dict:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
lowerCamelCase__ : Any = flax_key_tuple[:-1] + ("""weight""",)
lowerCamelCase__ : str = torch.permute(UpperCamelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(UpperCamelCase ):
# linear layer
lowerCamelCase__ : Optional[int] = flax_key_tuple[:-1] + ("""weight""",)
lowerCamelCase__ : Union[str, Any] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowerCamelCase__ : Any = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Tuple:
if "metadata" in layer:
lowerCamelCase__ : List[Any] = layer.split("""metadata""" )
lowerCamelCase__ : int = """""".join(split_layer[0] )[:-1]
lowerCamelCase__ : Any = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
lowerCamelCase__ : Tuple = layer.split("""kvstore""" )
lowerCamelCase__ : Tuple = """""".join(split_layer[0] )[:-1]
lowerCamelCase__ : Optional[int] = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
lowerCamelCase__ : int = layer.split("""/""" )
lowerCamelCase__ : str = """/""".join(split_layer[:-1] )
lowerCamelCase__ : List[str] = (split_layer[-1],)
if "kvstore/path" in layer:
lowerCamelCase__ : Optional[Any] = f'''{switch_checkpoint_path}/{checkpoint_info[layer]}'''
elif "kvstore/driver" in layer:
lowerCamelCase__ : int = """file"""
else:
lowerCamelCase__ : str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Optional[int]:
lowerCamelCase__ : Tuple = rename_keys(UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = {}
for k, v in current_block.items():
lowerCamelCase__ : Optional[int] = v
lowerCamelCase__ : Union[str, Any] = new_current_block
torch.save(UpperCamelCase , UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = WEIGHTS_NAME ) -> Optional[int]:
lowerCamelCase__ : Optional[Any] = convert_file_size_to_int(UpperCamelCase )
lowerCamelCase__ : Tuple = []
lowerCamelCase__ : Union[str, Any] = {}
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : List[str] = 0
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
lowerCamelCase__ : str = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
lowerCamelCase__ : List[str] = flatten_dict(UpperCamelCase , sep="""/""" )
lowerCamelCase__ : Dict = {}
for layer in checkpoint_info.keys():
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Any = get_key_and_tensorstore_dict(
UpperCamelCase , UpperCamelCase , UpperCamelCase )
if curr_real_layer_name in all_layers:
lowerCamelCase__ : Any = content
else:
lowerCamelCase__ : int = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
lowerCamelCase__ : str = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
lowerCamelCase__ : str = torch.tensor(UpperCamelCase )
lowerCamelCase__ : List[str] = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
lowerCamelCase__ , lowerCamelCase__ : List[Any] = rename_base_flax_keys(tuple(key.split("""/""" ) ) , UpperCamelCase )
lowerCamelCase__ : List[str] = """/""".join(UpperCamelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
lowerCamelCase__ : Union[str, Any] = os.path.join(
UpperCamelCase , weights_name.replace(""".bin""" , f'''-{len(UpperCamelCase )+1:05d}-of-???.bin''' ) )
rename_and_save_block(UpperCamelCase , UpperCamelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
lowerCamelCase__ : Optional[Any] = {}
lowerCamelCase__ : Union[str, Any] = 0
lowerCamelCase__ : Tuple = raw_weights.to(getattr(UpperCamelCase , UpperCamelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
lowerCamelCase__ : str = os.path.join(UpperCamelCase , weights_name.replace(""".bin""" , f'''-{len(UpperCamelCase )+1:05d}-of-???.bin''' ) )
rename_and_save_block(UpperCamelCase , UpperCamelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(UpperCamelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
lowerCamelCase__ : List[str] = {}
lowerCamelCase__ : List[Any] = {}
for idx, shard in enumerate(UpperCamelCase ):
lowerCamelCase__ : Any = weights_name.replace(
""".bin""" , f'''-{idx+1:05d}-of-{len(UpperCamelCase ):05d}.bin''' ) # len(sharded_state_dicts):05d}
lowerCamelCase__ : List[Any] = os.path.join(UpperCamelCase , weights_name.replace(""".bin""" , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(UpperCamelCase , os.path.join(UpperCamelCase , UpperCamelCase ) )
lowerCamelCase__ : Any = shard
for key in shard:
lowerCamelCase__ : List[str] = shard_file
# Add the metadata
lowerCamelCase__ : int = {"""total_size""": total_size}
lowerCamelCase__ : Optional[int] = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(UpperCamelCase , UpperCamelCase ) , """w""" , encoding="""utf-8""" ) as f:
lowerCamelCase__ : Any = json.dumps(UpperCamelCase , indent=2 , sort_keys=UpperCamelCase ) + """\n"""
f.write(UpperCamelCase )
return metadata, index
if __name__ == "__main__":
_A : Tuple =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--max_shard_size''', default='''10GB''', required=False, help='''Max shard size''')
parser.add_argument('''--dtype''', default='''bfloat16''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
_A : Optional[Any] =parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def SCREAMING_SNAKE_CASE_ () -> Union[str, Any]:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
lowerCamelCase__ : Union[str, Any] = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
lowerCamelCase__ : List[Any] = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
lowerCamelCase__ : str = TaTokenizer.from_pretrained("""t5-small""" )
lowerCamelCase__ : Any = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
lowerCamelCase__ : Dict = tokenizer(UpperCamelCase , return_tensors="""pt""" ).input_ids
lowerCamelCase__ : List[str] = model.generate(UpperCamelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 129 |
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_A : str =WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Tuple:
lowerCamelCase__ : int = test_results.split(""" """ )
lowerCamelCase__ : Optional[int] = 0
lowerCamelCase__ : Any = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
lowerCamelCase__ : Union[str, Any] = expressions[-2] if """=""" in expressions[-1] else expressions[-1]
for i, expression in enumerate(UpperCamelCase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Tuple:
lowerCamelCase__ : Optional[Any] = {}
lowerCamelCase__ : int = None
lowerCamelCase__ : Optional[int] = False
for line in failures_short_lines.split("""\n""" ):
if re.search(r"""_ \[doctest\]""" , UpperCamelCase ):
lowerCamelCase__ : Dict = True
lowerCamelCase__ : Optional[int] = line.split(""" """ )[2]
elif in_error and not line.split(""" """ )[0].isdigit():
lowerCamelCase__ : List[str] = line
lowerCamelCase__ : int = False
return failures
class _lowercase :
def __init__( self: Tuple , UpperCamelCase__: str , UpperCamelCase__: Dict ):
lowerCamelCase__ : Union[str, Any] = title
lowerCamelCase__ : Tuple = doc_test_results["""time_spent"""].split(""",""" )[0]
lowerCamelCase__ : Union[str, Any] = doc_test_results["""success"""]
lowerCamelCase__ : List[Any] = doc_test_results["""failures"""]
lowerCamelCase__ : List[str] = self.n_success + self.n_failures
# Failures and success of the modeling tests
lowerCamelCase__ : str = doc_test_results
@property
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Union[str, Any] = [self._time_spent]
lowerCamelCase__ : Tuple = 0
for time in time_spent:
lowerCamelCase__ : Tuple = time.split(""":""" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(UpperCamelCase__ ) == 1:
lowerCamelCase__ : Tuple = [0, 0, time_parts[0]]
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3_600 + minutes * 60 + seconds
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = total_secs // 3_600, (total_secs % 3_600) // 60, total_secs % 60
return F'''{int(UpperCamelCase__ )}h{int(UpperCamelCase__ )}m{int(UpperCamelCase__ )}s'''
@property
def lowerCamelCase_ ( self: Dict ):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def lowerCamelCase_ ( self: Any ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
@property
def lowerCamelCase_ ( self: Any ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'''
F''' {self.time}.'''
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
@property
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Any = 40
lowerCamelCase__ : List[str] = {k: v["""failed"""] for k, v in doc_test_results.items() if isinstance(UpperCamelCase__ , UpperCamelCase__ )}
lowerCamelCase__ : List[Any] = """"""
for category, failures in category_failures.items():
if len(UpperCamelCase__ ) == 0:
continue
if report != "":
report += "\n\n"
report += F'''*{category} failures*:'''.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(UpperCamelCase__ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'''The following examples had failures:\n\n\n{report}\n''',
},
}
@property
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Union[str, Any] = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(UpperCamelCase__ )
@staticmethod
def lowerCamelCase_ ( ):
lowerCamelCase__ : List[Any] = [
{
"""type""": """section""",
"""text""": {
"""type""": """plain_text""",
"""text""": """There was an issue running the tests.""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """Check Action results""", """emoji""": True},
"""url""": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
]
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(UpperCamelCase__ )} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text="""There was an issue running the tests.""" , blocks=UpperCamelCase__ , )
def lowerCamelCase_ ( self: Any ):
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(self.payload )} ) )
lowerCamelCase__ : Any = F'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else """All tests passed."""
lowerCamelCase__ : List[str] = client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , blocks=self.payload , text=UpperCamelCase__ , )
def lowerCamelCase_ ( self: Any , UpperCamelCase__: str , UpperCamelCase__: Dict , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : Optional[Any] = """"""
for key, value in failures.items():
lowerCamelCase__ : int = value[:200] + """ [Truncated]""" if len(UpperCamelCase__ ) > 250 else value
failures_text += F'''*{key}*\n_{value}_\n\n'''
lowerCamelCase__ : Tuple = job_name
lowerCamelCase__ : Union[str, Any] = {"""type""": """section""", """text""": {"""type""": """mrkdwn""", """text""": text}}
if job_link is not None:
lowerCamelCase__ : Union[str, Any] = {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """GitHub Action job""", """emoji""": True},
"""url""": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def lowerCamelCase_ ( self: Tuple ):
if self.thread_ts is None:
raise ValueError("""Can only post reply if a post has been made.""" )
lowerCamelCase__ : int = self.doc_test_results.pop("""job_link""" )
self.doc_test_results.pop("""failures""" )
self.doc_test_results.pop("""success""" )
self.doc_test_results.pop("""time_spent""" )
lowerCamelCase__ : List[Any] = sorted(self.doc_test_results.items() , key=lambda UpperCamelCase__ : t[0] )
for job, job_result in sorted_dict:
if len(job_result["""failures"""] ):
lowerCamelCase__ : Union[str, Any] = F'''*Num failures* :{len(job_result['failed'] )} \n'''
lowerCamelCase__ : Union[str, Any] = job_result["""failures"""]
lowerCamelCase__ : int = self.get_reply_blocks(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , text=UpperCamelCase__ )
print("""Sending the following reply""" )
print(json.dumps({"""blocks""": blocks} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text=F'''Results for {job}''' , blocks=UpperCamelCase__ , thread_ts=self.thread_ts["""ts"""] , )
time.sleep(1 )
def SCREAMING_SNAKE_CASE_ () -> Tuple:
lowerCamelCase__ : Any = os.environ["""GITHUB_RUN_ID"""]
lowerCamelCase__ : List[Any] = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'''
lowerCamelCase__ : Optional[int] = requests.get(UpperCamelCase ).json()
lowerCamelCase__ : List[Any] = {}
try:
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
lowerCamelCase__ : Any = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(UpperCamelCase ):
lowerCamelCase__ : List[Any] = requests.get(url + f'''&page={i + 2}''' ).json()
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return jobs
except Exception as e:
print("""Unknown error, could not fetch links.""" , UpperCamelCase )
return {}
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Any:
lowerCamelCase__ : int = {}
if os.path.exists(UpperCamelCase ):
lowerCamelCase__ : List[str] = os.listdir(UpperCamelCase )
for file in files:
try:
with open(os.path.join(UpperCamelCase , UpperCamelCase ) , encoding="""utf-8""" ) as f:
lowerCamelCase__ : List[Any] = f.read()
except UnicodeDecodeError as e:
raise ValueError(f'''Could not open {os.path.join(UpperCamelCase , UpperCamelCase )}.''' ) from e
return _artifact
def SCREAMING_SNAKE_CASE_ () -> Optional[Any]:
class _lowercase :
def __init__( self: Tuple , UpperCamelCase__: str ):
lowerCamelCase__ : Any = name
lowerCamelCase__ : Union[str, Any] = []
def __str__( self: int ):
return self.name
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: str ):
self.paths.append({"""name""": self.name, """path""": path} )
lowerCamelCase__ : Dict[str, Artifact] = {}
lowerCamelCase__ : List[str] = filter(os.path.isdir , os.listdir() )
for directory in directories:
lowerCamelCase__ : Union[str, Any] = directory
if artifact_name not in _available_artifacts:
lowerCamelCase__ : Optional[int] = Artifact(UpperCamelCase )
_available_artifacts[artifact_name].add_path(UpperCamelCase )
return _available_artifacts
if __name__ == "__main__":
_A : Any =get_job_links()
_A : str =retrieve_available_artifacts()
_A : int =collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_A : Union[str, Any] ={
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_A : Union[str, Any] =github_actions_job_links.get('''run_doctests''')
_A : Any =available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
_A : Dict =retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
_A , _A , _A : Optional[int] =handle_test_results(artifact['''stats'''])
_A : Union[str, Any] =failed
_A : int =success
_A : Optional[int] =time_spent[1:-1] + ''', '''
_A : Any =extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
_A : List[Any] =line.replace('''FAILED ''', '''''')
_A : Any =line.split()[0].replace('''\n''', '''''')
if "::" in line:
_A , _A : Any =line.split('''::''')
else:
_A , _A : Tuple =line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_A : str =docs[file_regex]
doc_test_results[category]["failed"].append(test)
_A : str =all_failures[test] if test in all_failures else '''N/A'''
_A : Tuple =failure
break
_A : Union[str, Any] =Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply()
| 129 | 1 |
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
_snake_case = BlenderbotSmallTokenizer
_snake_case = False
def A__ ( self )-> int:
'''simple docstring'''
super().setUp()
__UpperCamelCase = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
__UpperCamelCase = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__UpperCamelCase = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
__UpperCamelCase = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
__UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
def A__ ( self , **SCREAMING_SNAKE_CASE_ )-> Any:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> Optional[int]:
'''simple docstring'''
__UpperCamelCase = '''adapt act apte'''
__UpperCamelCase = '''adapt act apte'''
return input_text, output_text
def A__ ( self )-> Any:
'''simple docstring'''
__UpperCamelCase = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCamelCase = '''adapt act apte'''
__UpperCamelCase = ['''adapt''', '''act''', '''ap@@''', '''te''']
__UpperCamelCase = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
__UpperCamelCase = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> Tuple:
'''simple docstring'''
__UpperCamelCase = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [1384]
__UpperCamelCase = '''I am a small frog.'''
__UpperCamelCase = tok([src_text] , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ )['''input_ids''']
__UpperCamelCase = tok.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def A__ ( self )-> Optional[int]:
'''simple docstring'''
__UpperCamelCase = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
__UpperCamelCase = '''I am a small frog .'''
__UpperCamelCase = '''.'''
__UpperCamelCase = tok(SCREAMING_SNAKE_CASE_ )['''input_ids''']
__UpperCamelCase = tok(SCREAMING_SNAKE_CASE_ )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 328 |
def A_ ( snake_case : list ) -> list:
'''simple docstring'''
__UpperCamelCase = len(snake_case )
for i in range(1 , snake_case ):
__UpperCamelCase = collection[i]
__UpperCamelCase = 0
__UpperCamelCase = i - 1
while low <= high:
__UpperCamelCase = (low + high) // 2
if val < collection[mid]:
__UpperCamelCase = mid - 1
else:
__UpperCamelCase = mid + 1
for j in range(snake_case , snake_case , -1 ):
__UpperCamelCase = collection[j - 1]
__UpperCamelCase = val
return collection
if __name__ == "__main__":
lowercase__ : List[Any] = input("Enter numbers separated by a comma:\n").strip()
lowercase__ : str = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 328 | 1 |
import doctest
from collections import deque
import numpy as np
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = [2, 1, 2, -1]
UpperCamelCase = [1, 2, 3, 4]
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = len(self.first_signal )
UpperCamelCase = len(self.second_signal )
UpperCamelCase = max(lowerCamelCase_ , lowerCamelCase_ )
# create a zero matrix of max_length x max_length
UpperCamelCase = [[0] * max_length for i in range(lowerCamelCase_ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowerCamelCase_ ):
UpperCamelCase = deque(self.second_signal )
rotated_signal.rotate(lowerCamelCase_ )
for j, item in enumerate(lowerCamelCase_ ):
matrix[i][j] += item
# multiply the matrix with the first signal
UpperCamelCase = np.matmul(np.transpose(lowerCamelCase_ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowerCamelCase_ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 370 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE = {"""configuration_yolos""": ["""YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """YolosConfig""", """YolosOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["""YolosFeatureExtractor"""]
_SCREAMING_SNAKE_CASE = ["""YolosImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""YolosForObjectDetection""",
"""YolosModel""",
"""YolosPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 165 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.