code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import math
def lowerCamelCase_ ( _UpperCamelCase ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCamelCase_ ( _UpperCamelCase = 10_001 ) -> int:
"""simple docstring"""
try:
snake_case_ : Union[str, Any] = int(_UpperCamelCase )
except (TypeError, ValueError):
raise TypeError('''Parameter nth must be int or castable to int.''' ) from None
if nth <= 0:
raise ValueError('''Parameter nth must be greater than or equal to one.''' )
snake_case_ : list[int] = []
snake_case_ : Optional[int] = 2
while len(_UpperCamelCase ) < nth:
if is_prime(_UpperCamelCase ):
primes.append(_UpperCamelCase )
num += 1
else:
num += 1
return primes[len(_UpperCamelCase ) - 1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 60 |
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case ( _lowercase):
def __init__( self : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : int=1_3 , __lowerCAmelCase : Optional[int]=7 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : str=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : int=True , __lowerCAmelCase : List[str]=9_9 , __lowerCAmelCase : List[Any]=3_2 , __lowerCAmelCase : Union[str, Any]=5 , __lowerCAmelCase : List[str]=4 , __lowerCAmelCase : Optional[int]=3_7 , __lowerCAmelCase : List[Any]="gelu" , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : int=5_1_2 , __lowerCAmelCase : Tuple=1_6 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Any="None" , __lowerCAmelCase : str=3 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : Optional[Any]=None , ):
"""simple docstring"""
_lowerCamelCase : Dict = parent
_lowerCamelCase : Union[str, Any] = batch_size
_lowerCamelCase : Optional[Any] = seq_length
_lowerCamelCase : Optional[Any] = is_training
_lowerCamelCase : Dict = use_input_mask
_lowerCamelCase : Tuple = use_token_type_ids
_lowerCamelCase : Optional[Any] = use_labels
_lowerCamelCase : List[str] = vocab_size
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : int = num_hidden_layers
_lowerCamelCase : Optional[Any] = num_attention_heads
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : int = hidden_dropout_prob
_lowerCamelCase : Dict = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : List[Any] = type_sequence_label_size
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : Optional[int] = num_labels
_lowerCamelCase : Any = num_choices
_lowerCamelCase : int = relative_attention
_lowerCamelCase : Union[str, Any] = position_biased_input
_lowerCamelCase : str = pos_att_type
_lowerCamelCase : Tuple = scope
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : List[Any] = None
if self.use_input_mask:
_lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_lowerCamelCase : Any = None
if self.use_token_type_ids:
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase : Any = None
_lowerCamelCase : int = None
_lowerCamelCase : Union[str, Any] = None
if self.use_labels:
_lowerCamelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices )
_lowerCamelCase : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : str ):
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : List[str] = DebertaVaModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Dict = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )[0]
_lowerCamelCase : str = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )[0]
_lowerCamelCase : List[Any] = model(__lowerCAmelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : Tuple = DebertaVaForMaskedLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Optional[int] = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.num_labels
_lowerCamelCase : Dict = DebertaVaForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Dict = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.num_labels
_lowerCamelCase : Tuple = DebertaVaForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Any = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[str] = DebertaVaForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Tuple = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = DebertaVaForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : List[Any] = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Any = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCamelCase : Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : int = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
snake_case__ : int = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : List[str] = True
snake_case__ : List[Any] = False
snake_case__ : int = False
snake_case__ : Optional[Any] = False
snake_case__ : str = False
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : List[str] = DebertaVaModelTester(self )
_lowerCamelCase : Any = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*__lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Tuple = DebertaVaModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase):
@unittest.skip(reason='''Model not available yet''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
pass
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Tuple = DebertaVaModel.from_pretrained('''microsoft/deberta-v2-xlarge''' )
_lowerCamelCase : List[str] = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
_lowerCamelCase : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowerCamelCase : Tuple = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0]
# compare the actual values for a slice.
_lowerCamelCase : Union[str, Any] = torch.tensor(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCAmelCase , atol=1E-4 ) , f'''{output[:, 1:4, 1:4]}''' )
| 83 | 0 |
from math import pi, sqrt, tan
def _A ( lowerCAmelCase_ : float ):
"""simple docstring"""
if side_length < 0:
raise ValueError("surface_area_cube() only accepts non-negative values" )
return 6 * side_length**2
def _A ( lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : float ):
"""simple docstring"""
if length < 0 or breadth < 0 or height < 0:
raise ValueError("surface_area_cuboid() only accepts non-negative values" )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def _A ( lowerCAmelCase_ : float ):
"""simple docstring"""
if radius < 0:
raise ValueError("surface_area_sphere() only accepts non-negative values" )
return 4 * pi * radius**2
def _A ( lowerCAmelCase_ : float ):
"""simple docstring"""
if radius < 0:
raise ValueError("surface_area_hemisphere() only accepts non-negative values" )
return 3 * pi * radius**2
def _A ( lowerCAmelCase_ : float , lowerCAmelCase_ : float ):
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError("surface_area_cone() only accepts non-negative values" )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def _A ( lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : float ):
"""simple docstring"""
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"surface_area_conical_frustum() only accepts non-negative values" )
lowerCAmelCase__ = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def _A ( lowerCAmelCase_ : float , lowerCAmelCase_ : float ):
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError("surface_area_cylinder() only accepts non-negative values" )
return 2 * pi * radius * (height + radius)
def _A ( lowerCAmelCase_ : float , lowerCAmelCase_ : float ):
"""simple docstring"""
if torus_radius < 0 or tube_radius < 0:
raise ValueError("surface_area_torus() only accepts non-negative values" )
if torus_radius < tube_radius:
raise ValueError(
"surface_area_torus() does not support spindle or self intersecting tori" )
return 4 * pow(lowerCAmelCase_ , 2 ) * torus_radius * tube_radius
def _A ( lowerCAmelCase_ : float , lowerCAmelCase_ : float ):
"""simple docstring"""
if length < 0 or width < 0:
raise ValueError("area_rectangle() only accepts non-negative values" )
return length * width
def _A ( lowerCAmelCase_ : float ):
"""simple docstring"""
if side_length < 0:
raise ValueError("area_square() only accepts non-negative values" )
return side_length**2
def _A ( lowerCAmelCase_ : float , lowerCAmelCase_ : float ):
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError("area_triangle() only accepts non-negative values" )
return (base * height) / 2
def _A ( lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : float ):
"""simple docstring"""
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("area_triangle_three_sides() only accepts non-negative values" )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("Given three sides do not form a triangle" )
lowerCAmelCase__ = (sidea + sidea + sidea) / 2
lowerCAmelCase__ = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def _A ( lowerCAmelCase_ : float , lowerCAmelCase_ : float ):
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError("area_parallelogram() only accepts non-negative values" )
return base * height
def _A ( lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : float ):
"""simple docstring"""
if basea < 0 or basea < 0 or height < 0:
raise ValueError("area_trapezium() only accepts non-negative values" )
return 1 / 2 * (basea + basea) * height
def _A ( lowerCAmelCase_ : float ):
"""simple docstring"""
if radius < 0:
raise ValueError("area_circle() only accepts non-negative values" )
return pi * radius**2
def _A ( lowerCAmelCase_ : float , lowerCAmelCase_ : float ):
"""simple docstring"""
if radius_x < 0 or radius_y < 0:
raise ValueError("area_ellipse() only accepts non-negative values" )
return pi * radius_x * radius_y
def _A ( lowerCAmelCase_ : float , lowerCAmelCase_ : float ):
"""simple docstring"""
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("area_rhombus() only accepts non-negative values" )
return 1 / 2 * diagonal_a * diagonal_a
def _A ( lowerCAmelCase_ : int , lowerCAmelCase_ : float ):
"""simple docstring"""
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or sides < 3:
raise ValueError(
"area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides" )
elif length < 0:
raise ValueError(
"area_reg_polygon() only accepts non-negative values as \
length of a side" )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(F"""Rectangle: {area_rectangle(10, 20) = }""")
print(F"""Square: {area_square(10) = }""")
print(F"""Triangle: {area_triangle(10, 10) = }""")
print(F"""Triangle: {area_triangle_three_sides(5, 12, 13) = }""")
print(F"""Parallelogram: {area_parallelogram(10, 20) = }""")
print(F"""Rhombus: {area_rhombus(10, 20) = }""")
print(F"""Trapezium: {area_trapezium(10, 20, 30) = }""")
print(F"""Circle: {area_circle(20) = }""")
print(F"""Ellipse: {area_ellipse(10, 20) = }""")
print('\nSurface Areas of various geometric shapes: \n')
print(F"""Cube: {surface_area_cube(20) = }""")
print(F"""Cuboid: {surface_area_cuboid(10, 20, 30) = }""")
print(F"""Sphere: {surface_area_sphere(20) = }""")
print(F"""Hemisphere: {surface_area_hemisphere(20) = }""")
print(F"""Cone: {surface_area_cone(10, 20) = }""")
print(F"""Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }""")
print(F"""Cylinder: {surface_area_cylinder(10, 20) = }""")
print(F"""Torus: {surface_area_torus(20, 10) = }""")
print(F"""Equilateral Triangle: {area_reg_polygon(3, 10) = }""")
print(F"""Square: {area_reg_polygon(4, 10) = }""")
print(F"""Reqular Pentagon: {area_reg_polygon(5, 10) = }""")
| 61 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
lowerCAmelCase__ = '''hf-internal-testing/tiny-random-bert'''
lowerCAmelCase__ = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''')
lowerCAmelCase__ = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6'''
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = cached_file(__lowerCAmelCase , __lowerCAmelCase )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(__lowerCAmelCase ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) ) )
with open(os.path.join(__lowerCAmelCase , '''refs''' , '''main''' ) ) as f:
_lowerCamelCase : Optional[int] = f.read()
self.assertEqual(__lowerCAmelCase , os.path.join(__lowerCAmelCase , '''snapshots''' , __lowerCAmelCase , __lowerCAmelCase ) )
self.assertTrue(os.path.isfile(__lowerCAmelCase ) )
# File is cached at the same place the second time.
_lowerCamelCase : Tuple = cached_file(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
# Using a specific revision to test the full commit hash.
_lowerCamelCase : Dict = cached_file(__lowerCAmelCase , __lowerCAmelCase , revision='''9b8c223''' )
self.assertEqual(__lowerCAmelCase , os.path.join(__lowerCAmelCase , '''snapshots''' , __lowerCAmelCase , __lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
with self.assertRaisesRegex(__lowerCAmelCase , '''is not a valid model identifier''' ):
_lowerCamelCase : Optional[int] = cached_file('''tiny-random-bert''' , __lowerCAmelCase )
with self.assertRaisesRegex(__lowerCAmelCase , '''is not a valid git identifier''' ):
_lowerCamelCase : str = cached_file(__lowerCAmelCase , __lowerCAmelCase , revision='''aaaa''' )
with self.assertRaisesRegex(__lowerCAmelCase , '''does not appear to have a file named''' ):
_lowerCamelCase : int = cached_file(__lowerCAmelCase , '''conf''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
with self.assertRaisesRegex(__lowerCAmelCase , '''does not appear to have a file named''' ):
_lowerCamelCase : Dict = cached_file(__lowerCAmelCase , '''conf''' )
with open(os.path.join(__lowerCAmelCase , '''refs''' , '''main''' ) ) as f:
_lowerCamelCase : List[Any] = f.read()
self.assertTrue(os.path.isfile(os.path.join(__lowerCAmelCase , '''.no_exist''' , __lowerCAmelCase , '''conf''' ) ) )
_lowerCamelCase : str = cached_file(__lowerCAmelCase , '''conf''' , _raise_exceptions_for_missing_entries=__lowerCAmelCase )
self.assertIsNone(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = cached_file(__lowerCAmelCase , '''conf''' , local_files_only=__lowerCAmelCase , _raise_exceptions_for_missing_entries=__lowerCAmelCase )
self.assertIsNone(__lowerCAmelCase )
_lowerCamelCase : Any = mock.Mock()
_lowerCamelCase : Optional[Any] = 5_0_0
_lowerCamelCase : Dict = {}
_lowerCamelCase : List[Any] = HTTPError
_lowerCamelCase : int = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=__lowerCAmelCase ) as mock_head:
_lowerCamelCase : Union[str, Any] = cached_file(__lowerCAmelCase , '''conf''' , _raise_exceptions_for_connection_errors=__lowerCAmelCase )
self.assertIsNone(__lowerCAmelCase )
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __lowerCAmelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __lowerCAmelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(__lowerCAmelCase , '''is not a valid model identifier''' ):
get_file_from_repo('''bert-base-case''' , __lowerCAmelCase )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(__lowerCAmelCase , '''is not a valid git identifier''' ):
get_file_from_repo('''bert-base-cased''' , __lowerCAmelCase , revision='''ahaha''' )
_lowerCamelCase : Dict = get_file_from_repo('''bert-base-cased''' , __lowerCAmelCase )
# The name is the cached name which is not very easy to test, so instead we load the content.
_lowerCamelCase : Dict = json.loads(open(__lowerCAmelCase , '''r''' ).read() )
self.assertEqual(config['''hidden_size'''] , 7_6_8 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : Any = Path(__lowerCAmelCase ) / '''a.txt'''
filename.touch()
self.assertEqual(get_file_from_repo(__lowerCAmelCase , '''a.txt''' ) , str(__lowerCAmelCase ) )
self.assertIsNone(get_file_from_repo(__lowerCAmelCase , '''b.txt''' ) )
| 83 | 0 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any]=14 , UpperCAmelCase_ : int=7 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Any=99 , UpperCAmelCase_ : str=32 , UpperCAmelCase_ : Optional[int]=5 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : Dict=37 , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : int=512 , UpperCAmelCase_ : Optional[int]=16 , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : List[str]=4 , UpperCAmelCase_ : Optional[Any]=None , ):
SCREAMING_SNAKE_CASE : List[str] = parent
SCREAMING_SNAKE_CASE : Tuple = batch_size
SCREAMING_SNAKE_CASE : List[str] = seq_length
SCREAMING_SNAKE_CASE : Union[str, Any] = is_training
SCREAMING_SNAKE_CASE : Any = use_token_type_ids
SCREAMING_SNAKE_CASE : Tuple = use_input_mask
SCREAMING_SNAKE_CASE : Any = use_labels
SCREAMING_SNAKE_CASE : Tuple = use_mc_token_ids
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
SCREAMING_SNAKE_CASE : str = type_sequence_label_size
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : str = num_labels
SCREAMING_SNAKE_CASE : int = num_choices
SCREAMING_SNAKE_CASE : Dict = scope
SCREAMING_SNAKE_CASE : str = self.vocab_size - 1
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : str = None
if self.use_mc_token_ids:
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : Dict = self.get_config()
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _A ( self : Optional[int] ):
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def _A ( self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] , *UpperCAmelCase_ : str ):
SCREAMING_SNAKE_CASE : str = CTRLModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , head_mask=UpperCAmelCase_ )
model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def _A ( self : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] , *UpperCAmelCase_ : Optional[int] ):
SCREAMING_SNAKE_CASE : Optional[int] = CTRLLMHeadModel(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : int = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : Tuple = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : Optional[Any] = config_and_inputs
SCREAMING_SNAKE_CASE : Dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask}
return config, inputs_dict
def _A ( self : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple , *UpperCAmelCase_ : List[str] ):
SCREAMING_SNAKE_CASE : int = self.num_labels
SCREAMING_SNAKE_CASE : Dict = CTRLForSequenceClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Any = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : str = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
UpperCamelCase_ : Any = (CTRLLMHeadModel,) if is_torch_available() else ()
UpperCamelCase_ : Dict = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase_ : List[str] = True
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : List[str] = False
def _A ( self : Tuple , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def _A ( self : str ):
SCREAMING_SNAKE_CASE : Union[str, Any] = CTRLModelTester(self )
SCREAMING_SNAKE_CASE : Any = ConfigTester(self , config_class=UpperCAmelCase_ , n_embd=37 )
def _A ( self : Dict ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def _A ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*UpperCAmelCase_ )
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*UpperCAmelCase_ )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _A ( self : int ):
pass
@slow
def _A ( self : Any ):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Any = CTRLModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def _A ( self : Dict ):
pass
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _A ( self : Union[str, Any] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : Dict = CTRLLMHeadModel.from_pretrained("ctrl" )
model.to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(
[[1_1859, 0, 1611, 8]] , dtype=torch.long , device=UpperCAmelCase_ ) # Legal the president is
SCREAMING_SNAKE_CASE : List[str] = [
1_1859,
0,
1611,
8,
5,
150,
2_6449,
2,
19,
348,
469,
3,
2595,
48,
2_0740,
24_6533,
24_6533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
SCREAMING_SNAKE_CASE : str = model.generate(UpperCAmelCase_ , do_sample=UpperCAmelCase_ )
self.assertListEqual(output_ids[0].tolist() , UpperCAmelCase_ )
| 62 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __snake_case ( _lowercase):
snake_case__ : List[str] = "cvt"
def __init__( self : Any , __lowerCAmelCase : Union[str, Any]=3 , __lowerCAmelCase : List[str]=[7, 3, 3] , __lowerCAmelCase : int=[4, 2, 2] , __lowerCAmelCase : int=[2, 1, 1] , __lowerCAmelCase : str=[6_4, 1_9_2, 3_8_4] , __lowerCAmelCase : Dict=[1, 3, 6] , __lowerCAmelCase : Optional[Any]=[1, 2, 1_0] , __lowerCAmelCase : Dict=[4.0, 4.0, 4.0] , __lowerCAmelCase : Dict=[0.0, 0.0, 0.0] , __lowerCAmelCase : Union[str, Any]=[0.0, 0.0, 0.0] , __lowerCAmelCase : int=[0.0, 0.0, 0.1] , __lowerCAmelCase : Union[str, Any]=[True, True, True] , __lowerCAmelCase : str=[False, False, True] , __lowerCAmelCase : List[str]=["dw_bn", "dw_bn", "dw_bn"] , __lowerCAmelCase : List[Any]=[3, 3, 3] , __lowerCAmelCase : Dict=[1, 1, 1] , __lowerCAmelCase : str=[2, 2, 2] , __lowerCAmelCase : Optional[Any]=[1, 1, 1] , __lowerCAmelCase : Optional[Any]=[1, 1, 1] , __lowerCAmelCase : str=0.02 , __lowerCAmelCase : Any=1E-12 , **__lowerCAmelCase : int , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
_lowerCamelCase : Optional[int] = num_channels
_lowerCamelCase : int = patch_sizes
_lowerCamelCase : Optional[Any] = patch_stride
_lowerCamelCase : str = patch_padding
_lowerCamelCase : Any = embed_dim
_lowerCamelCase : Optional[Any] = num_heads
_lowerCamelCase : Dict = depth
_lowerCamelCase : Optional[int] = mlp_ratio
_lowerCamelCase : Any = attention_drop_rate
_lowerCamelCase : Any = drop_rate
_lowerCamelCase : Dict = drop_path_rate
_lowerCamelCase : Optional[int] = qkv_bias
_lowerCamelCase : int = cls_token
_lowerCamelCase : int = qkv_projection_method
_lowerCamelCase : Optional[Any] = kernel_qkv
_lowerCamelCase : List[str] = padding_kv
_lowerCamelCase : Tuple = stride_kv
_lowerCamelCase : Union[str, Any] = padding_q
_lowerCamelCase : Optional[Any] = stride_q
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : Optional[int] = layer_norm_eps
| 83 | 0 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
a : Optional[int] = {
"config": [
"EXTERNAL_DATA_FORMAT_SIZE_LIMIT",
"OnnxConfig",
"OnnxConfigWithPast",
"OnnxSeq2SeqConfigWithPast",
"PatchingSpec",
],
"convert": ["export", "validate_model_outputs"],
"features": ["FeaturesManager"],
"utils": ["ParameterFormat", "compute_serialized_parameters_size"],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
a : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class __snake_case ( _lowercase):
snake_case__ : torch.FloatTensor
snake_case__ : torch.FloatTensor
class __snake_case ( _lowercase , _lowercase):
snake_case__ : int = 1
@register_to_config
def __init__( self : str , __lowerCAmelCase : int = 2_0_0_0 , __lowerCAmelCase : float = 0.15 , __lowerCAmelCase : float = 0.01 , __lowerCAmelCase : float = 13_48.0 , __lowerCAmelCase : float = 1E-5 , __lowerCAmelCase : int = 1 , ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = sigma_max
# setable values
_lowerCamelCase : Dict = None
self.set_sigmas(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : Optional[int] = None ):
"""simple docstring"""
return sample
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : float = None , __lowerCAmelCase : Union[str, torch.device] = None ):
"""simple docstring"""
_lowerCamelCase : Tuple = sampling_eps if sampling_eps is not None else self.config.sampling_eps
_lowerCamelCase : Optional[int] = torch.linspace(1 , __lowerCAmelCase , __lowerCAmelCase , device=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : float = None , __lowerCAmelCase : float = None , __lowerCAmelCase : float = None ):
"""simple docstring"""
_lowerCamelCase : List[str] = sigma_min if sigma_min is not None else self.config.sigma_min
_lowerCamelCase : int = sigma_max if sigma_max is not None else self.config.sigma_max
_lowerCamelCase : Any = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[Any] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
_lowerCamelCase : Optional[int] = torch.exp(torch.linspace(math.log(__lowerCAmelCase ) , math.log(__lowerCAmelCase ) , __lowerCAmelCase ) )
_lowerCamelCase : Tuple = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : int , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : Optional[torch.Generator] = None , __lowerCAmelCase : bool = True , ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
_lowerCamelCase : Tuple = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
_lowerCamelCase : Dict = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
_lowerCamelCase : Optional[int] = timesteps.to(self.discrete_sigmas.device )
_lowerCamelCase : Any = self.discrete_sigmas[timesteps].to(sample.device )
_lowerCamelCase : int = self.get_adjacent_sigma(__lowerCAmelCase , __lowerCAmelCase ).to(sample.device )
_lowerCamelCase : Any = torch.zeros_like(__lowerCAmelCase )
_lowerCamelCase : Any = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
_lowerCamelCase : Union[str, Any] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
_lowerCamelCase : List[Any] = diffusion.unsqueeze(-1 )
_lowerCamelCase : int = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
_lowerCamelCase : List[str] = randn_tensor(
sample.shape , layout=sample.layout , generator=__lowerCAmelCase , device=sample.device , dtype=sample.dtype )
_lowerCamelCase : List[Any] = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
_lowerCamelCase : int = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__lowerCAmelCase , prev_sample_mean=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : Optional[torch.Generator] = None , __lowerCAmelCase : bool = True , ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
_lowerCamelCase : Union[str, Any] = randn_tensor(sample.shape , layout=sample.layout , generator=__lowerCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
_lowerCamelCase : Union[str, Any] = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
_lowerCamelCase : Tuple = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
_lowerCamelCase : str = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
_lowerCamelCase : Tuple = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
_lowerCamelCase : Union[str, Any] = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
_lowerCamelCase : str = step_size.unsqueeze(-1 )
_lowerCamelCase : Any = sample + step_size * model_output
_lowerCamelCase : int = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : torch.FloatTensor , ):
"""simple docstring"""
_lowerCamelCase : Dict = timesteps.to(original_samples.device )
_lowerCamelCase : Union[str, Any] = self.discrete_sigmas.to(original_samples.device )[timesteps]
_lowerCamelCase : Union[str, Any] = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__lowerCAmelCase ) * sigmas[:, None, None, None]
)
_lowerCamelCase : int = noise + original_samples
return noisy_samples
def __len__( self : Optional[int] ):
"""simple docstring"""
return self.config.num_train_timesteps
| 83 | 0 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class _lowerCamelCase :
__a = 42
__a = None
__a = None
def A__ ( snake_case_ : TreeNode | None ):
# Validation
def is_valid_tree(snake_case_ : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(snake_case_ , snake_case_ ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(snake_case_ ):
raise ValueError(
'''Each node should be type of TreeNode and data should be float.''' )
def is_binary_search_tree_recursive_check(
snake_case_ : TreeNode | None , snake_case_ : float , snake_case_ : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , snake_case_ , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , snake_case_ )
)
return is_binary_search_tree_recursive_check(snake_case_ , -float('''inf''' ) , float('''inf''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 |
"""simple docstring"""
from torch import nn
def snake_case_ ( A_ : int ):
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F'''Unsupported activation function: {act_fn}''' )
| 83 | 0 |
"""simple docstring"""
import requests
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = {"""Content-Type""": """application/json"""}
UpperCAmelCase__ : Optional[Any] = requests.post(__UpperCamelCase , json={"""text""": message_body} , headers=__UpperCamelCase )
if response.status_code != 200:
UpperCAmelCase__ : Any = (
"""Request to slack returned an error """
F"{response.status_code}, the response is:\n{response.text}"
)
raise ValueError(__UpperCamelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 65 |
"""simple docstring"""
def snake_case_ ( A_ : int, A_ : int ):
'''simple docstring'''
return int(input_a == input_a == 0 )
def snake_case_ ( ):
'''simple docstring'''
print('''Truth Table of NOR Gate:''' )
print('''| Input 1 | Input 2 | Output |''' )
print(F'''| 0 | 0 | {nor_gate(0, 0 )} |''' )
print(F'''| 0 | 1 | {nor_gate(0, 1 )} |''' )
print(F'''| 1 | 0 | {nor_gate(1, 0 )} |''' )
print(F'''| 1 | 1 | {nor_gate(1, 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 83 | 0 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class lowerCAmelCase_ ( TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
def __init__( self , _lowerCAmelCase=None , **_lowerCAmelCase ):
super().__init__(features=_lowerCAmelCase )
_lowercase : int = torch_tensor_kwargs
import torch # noqa import torch at initialization
def __a ( self , _lowerCAmelCase ):
import torch
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and column:
if all(
isinstance(_lowerCAmelCase , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(_lowerCAmelCase )
return column
def __a ( self , _lowerCAmelCase ):
import torch
if isinstance(_lowerCAmelCase , (str, bytes, type(_lowerCAmelCase )) ):
return value
elif isinstance(_lowerCAmelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
_lowercase : Optional[Any] = {}
if isinstance(_lowerCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
_lowercase : Union[str, Any] = {'dtype': torch.intaa}
elif isinstance(_lowerCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
_lowercase : Any = {'dtype': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_lowerCAmelCase , PIL.Image.Image ):
_lowercase : Optional[Any] = np.asarray(_lowerCAmelCase )
return torch.tensor(_lowerCAmelCase , **{**default_dtype, **self.torch_tensor_kwargs} )
def __a ( self , _lowerCAmelCase ):
import torch
# support for torch, tf, jax etc.
if hasattr(_lowerCAmelCase , '__array__' ) and not isinstance(_lowerCAmelCase , torch.Tensor ):
_lowercase : Optional[int] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_lowerCAmelCase , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_lowerCAmelCase ) for substruct in data_struct] )
elif isinstance(_lowerCAmelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(_lowerCAmelCase ) for substruct in data_struct] )
return self._tensorize(_lowerCAmelCase )
def __a ( self , _lowerCAmelCase ):
return map_nested(self._recursive_tensorize , _lowerCAmelCase , map_list=_lowerCAmelCase )
def __a ( self , _lowerCAmelCase ):
_lowercase : Any = self.numpy_arrow_extractor().extract_row(_lowerCAmelCase )
_lowercase : Union[str, Any] = self.python_features_decoder.decode_row(_lowerCAmelCase )
return self.recursive_tensorize(_lowerCAmelCase )
def __a ( self , _lowerCAmelCase ):
_lowercase : Tuple = self.numpy_arrow_extractor().extract_column(_lowerCAmelCase )
_lowercase : List[Any] = self.python_features_decoder.decode_column(_lowerCAmelCase , pa_table.column_names[0] )
_lowercase : Any = self.recursive_tensorize(_lowerCAmelCase )
_lowercase : Optional[int] = self._consolidate(_lowerCAmelCase )
return column
def __a ( self , _lowerCAmelCase ):
_lowercase : Dict = self.numpy_arrow_extractor().extract_batch(_lowerCAmelCase )
_lowercase : int = self.python_features_decoder.decode_batch(_lowerCAmelCase )
_lowercase : List[str] = self.recursive_tensorize(_lowerCAmelCase )
for column_name in batch:
_lowercase : Optional[int] = self._consolidate(batch[column_name] )
return batch
| 66 |
"""simple docstring"""
from __future__ import annotations
def snake_case_ ( A_ : list[list[int]] ):
'''simple docstring'''
for i in range(1, len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1, len(A_ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1, len(A_ ) ):
for j in range(1, len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j], matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ["""XLNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ["""XLNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLNetForMultipleChoice""",
"""XLNetForQuestionAnswering""",
"""XLNetForQuestionAnsweringSimple""",
"""XLNetForSequenceClassification""",
"""XLNetForTokenClassification""",
"""XLNetLMHeadModel""",
"""XLNetModel""",
"""XLNetPreTrainedModel""",
"""load_tf_weights_in_xlnet""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLNetForMultipleChoice""",
"""TFXLNetForQuestionAnsweringSimple""",
"""TFXLNetForSequenceClassification""",
"""TFXLNetForTokenClassification""",
"""TFXLNetLMHeadModel""",
"""TFXLNetMainLayer""",
"""TFXLNetModel""",
"""TFXLNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 67 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('''T''')
class __snake_case ( Generic[T]):
def __init__( self : int , __lowerCAmelCase : T ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = data
_lowerCamelCase : Node[T] | None = None
def __str__( self : Optional[Any] ):
"""simple docstring"""
return f'''{self.data}'''
class __snake_case ( Generic[T]):
def __init__( self : int ):
"""simple docstring"""
_lowerCamelCase : Node[T] | None = None
def __iter__( self : str ):
"""simple docstring"""
_lowerCamelCase : List[str] = self.top
while node:
yield node.data
_lowerCamelCase : Any = node.next
def __str__( self : int ):
"""simple docstring"""
return "->".join([str(__lowerCAmelCase ) for item in self] )
def __len__( self : int ):
"""simple docstring"""
return len(tuple(iter(self ) ) )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return self.top is None
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : T ):
"""simple docstring"""
_lowerCamelCase : Tuple = Node(__lowerCAmelCase )
if not self.is_empty():
_lowerCamelCase : Optional[int] = self.top
_lowerCamelCase : List[str] = node
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
if self.is_empty():
raise IndexError('''pop from empty stack''' )
assert isinstance(self.top , __lowerCAmelCase )
_lowerCamelCase : Any = self.top
_lowerCamelCase : Any = self.top.next
return pop_node.data
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
if self.is_empty():
raise IndexError('''peek from empty stack''' )
assert self.top is not None
return self.top.data
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : List[str] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 83 | 0 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument(
"--original_config_file",
type=str,
required=True,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--image_size",
default=5_12,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
def lowercase__ ( A_: List[Any] ) -> Union[str, Any]:
"""simple docstring"""
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F'''could not parse string as bool {string}''' )
parser.add_argument(
"--use_linear_projection", help="Override for use linear projection", required=False, type=parse_bool
)
parser.add_argument("--cross_attention_dim", help="Override for cross attention_dim", required=False, type=int)
__A = parser.parse_args()
__A = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 68 |
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCAmelCase__ = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase__ = importlib.util.spec_from_file_location(
'''transformers''',
os.path.join(PATH_TO_TRANSFORMERS, '''__init__.py'''),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
lowerCAmelCase__ = spec.loader.load_module()
lowerCAmelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCAmelCase__ = re.compile('''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
lowerCAmelCase__ = {
'''CLIPConfigMixin''',
'''DecisionTransformerConfigMixin''',
'''EncoderDecoderConfigMixin''',
'''RagConfigMixin''',
'''SpeechEncoderDecoderConfigMixin''',
'''VisionEncoderDecoderConfigMixin''',
'''VisionTextDualEncoderConfigMixin''',
}
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Any = []
for config_class in list(CONFIG_MAPPING.values() ):
_lowerCamelCase : Tuple = False
# source code of `config_class`
_lowerCamelCase : int = inspect.getsource(A_ )
_lowerCamelCase : str = _re_checkpoint.findall(A_ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
_lowerCamelCase , _lowerCamelCase : Tuple = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
_lowerCamelCase : Tuple = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
_lowerCamelCase : Union[str, Any] = True
break
_lowerCamelCase : Tuple = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(A_ )
if len(A_ ) > 0:
_lowerCamelCase : Union[str, Any] = '''\n'''.join(sorted(A_ ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 83 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = ShapEImgaImgPipeline
__SCREAMING_SNAKE_CASE = ["""image"""]
__SCREAMING_SNAKE_CASE = ["""image"""]
__SCREAMING_SNAKE_CASE = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
__SCREAMING_SNAKE_CASE = False
@property
def A ( self : int ):
"""simple docstring"""
return 32
@property
def A ( self : Optional[Any] ):
"""simple docstring"""
return 32
@property
def A ( self : str ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def A ( self : Dict ):
"""simple docstring"""
return 8
@property
def A ( self : Optional[int] ):
"""simple docstring"""
torch.manual_seed(0 )
__snake_case = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
__snake_case = CLIPVisionModel(a_ )
return model
@property
def A ( self : str ):
"""simple docstring"""
__snake_case = CLIPImageProcessor(
crop_size=224 , do_center_crop=a_ , do_normalize=a_ , do_resize=a_ , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
@property
def A ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
__snake_case = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"embedding_proj_norm_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
__snake_case = PriorTransformer(**a_ )
return model
@property
def A ( self : List[str] ):
"""simple docstring"""
torch.manual_seed(0 )
__snake_case = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
__snake_case = ShapERenderer(**a_ )
return model
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.dummy_prior
__snake_case = self.dummy_image_encoder
__snake_case = self.dummy_image_processor
__snake_case = self.dummy_renderer
__snake_case = HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=1_024 , prediction_type="sample" , use_karras_sigmas=a_ , clip_sample=a_ , clip_sample_range=1.0 , )
__snake_case = {
"prior": prior,
"image_encoder": image_encoder,
"image_processor": image_processor,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def A ( self : Optional[int] , a_ : Union[str, Any] , a_ : int=0 ):
"""simple docstring"""
__snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(a_ ) ).to(a_ )
if str(a_ ).startswith("mps" ):
__snake_case = torch.manual_seed(a_ )
else:
__snake_case = torch.Generator(device=a_ ).manual_seed(a_ )
__snake_case = {
"image": input_image,
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = "cpu"
__snake_case = self.get_dummy_components()
__snake_case = self.pipeline_class(**a_ )
__snake_case = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = pipe(**self.get_dummy_inputs(a_ ) )
__snake_case = output.images[0]
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__snake_case = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A ( self : List[Any] ):
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = torch_device == "cpu"
__snake_case = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=a_ , relax_max_difference=a_ , )
def A ( self : Any ):
"""simple docstring"""
__snake_case = self.get_dummy_components()
__snake_case = self.pipeline_class(**a_ )
__snake_case = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = 1
__snake_case = 2
__snake_case = self.get_dummy_inputs(a_ )
for key in inputs.keys():
if key in self.batch_params:
__snake_case = batch_size * [inputs[key]]
__snake_case = pipe(**a_ , num_images_per_prompt=a_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : List[str] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : str ):
"""simple docstring"""
__snake_case = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/corgi.png" )
__snake_case = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_img2img_out.npy" )
__snake_case = ShapEImgaImgPipeline.from_pretrained("openai/shap-e-img2img" )
__snake_case = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = torch.Generator(device=a_ ).manual_seed(0 )
__snake_case = pipe(
a_ , generator=a_ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(a_ , a_ )
| 69 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
lowerCAmelCase__ = False
class __snake_case ( unittest.TestCase):
pass
@nightly
@require_torch_gpu
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : int = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : str = '''A painting of a squirrel eating a burger '''
_lowerCamelCase : Dict = torch.manual_seed(0 )
_lowerCamelCase : List[Any] = pipe(
prompt=__lowerCAmelCase , generator=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : Tuple = VersatileDiffusionTextToImagePipeline.from_pretrained(__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : int = generator.manual_seed(0 )
_lowerCamelCase : List[str] = pipe(
prompt=__lowerCAmelCase , generator=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = VersatileDiffusionTextToImagePipeline.from_pretrained(
'''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = '''A painting of a squirrel eating a burger '''
_lowerCamelCase : Optional[int] = torch.manual_seed(0 )
_lowerCamelCase : int = pipe(
prompt=__lowerCAmelCase , generator=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' ).images
_lowerCamelCase : List[str] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : Dict = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 83 | 0 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase : List[str] = {
"microsoft/conditional-detr-resnet-50": (
"https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"
),
}
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''conditional_detr'''
UpperCamelCase = ['''past_key_values''']
UpperCamelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : Union[str, Any] , A_ : List[Any]=True , A_ : Dict=None , A_ : Any=3 , A_ : int=300 , A_ : Dict=6 , A_ : Any=2048 , A_ : Tuple=8 , A_ : Union[str, Any]=6 , A_ : Optional[Any]=2048 , A_ : Optional[int]=8 , A_ : List[Any]=0.0 , A_ : Any=0.0 , A_ : Tuple=True , A_ : Dict="relu" , A_ : Dict=256 , A_ : Optional[int]=0.1 , A_ : Tuple=0.0 , A_ : Any=0.0 , A_ : List[str]=0.02 , A_ : int=1.0 , A_ : Optional[int]=False , A_ : int="sine" , A_ : Tuple="resnet50" , A_ : Optional[Any]=True , A_ : Dict=False , A_ : Union[str, Any]=2 , A_ : str=5 , A_ : Union[str, Any]=2 , A_ : List[Any]=1 , A_ : List[Any]=1 , A_ : List[Any]=2 , A_ : Optional[Any]=5 , A_ : Optional[Any]=2 , A_ : Optional[int]=0.25 , **A_ : Optional[Any] , ) -> List[Any]:
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowerCamelCase_ = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(A_ , A_ ):
lowerCamelCase_ = backbone_config.get('model_type' )
lowerCamelCase_ = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase_ = config_class.from_dict(A_ )
lowerCamelCase_ = use_timm_backbone
lowerCamelCase_ = backbone_config
lowerCamelCase_ = num_channels
lowerCamelCase_ = num_queries
lowerCamelCase_ = d_model
lowerCamelCase_ = encoder_ffn_dim
lowerCamelCase_ = encoder_layers
lowerCamelCase_ = encoder_attention_heads
lowerCamelCase_ = decoder_ffn_dim
lowerCamelCase_ = decoder_layers
lowerCamelCase_ = decoder_attention_heads
lowerCamelCase_ = dropout
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = activation_dropout
lowerCamelCase_ = activation_function
lowerCamelCase_ = init_std
lowerCamelCase_ = init_xavier_std
lowerCamelCase_ = encoder_layerdrop
lowerCamelCase_ = decoder_layerdrop
lowerCamelCase_ = encoder_layers
lowerCamelCase_ = auxiliary_loss
lowerCamelCase_ = position_embedding_type
lowerCamelCase_ = backbone
lowerCamelCase_ = use_pretrained_backbone
lowerCamelCase_ = dilation
# Hungarian matcher
lowerCamelCase_ = class_cost
lowerCamelCase_ = bbox_cost
lowerCamelCase_ = giou_cost
# Loss coefficients
lowerCamelCase_ = mask_loss_coefficient
lowerCamelCase_ = dice_loss_coefficient
lowerCamelCase_ = cls_loss_coefficient
lowerCamelCase_ = bbox_loss_coefficient
lowerCamelCase_ = giou_loss_coefficient
lowerCamelCase_ = focal_alpha
super().__init__(is_encoder_decoder=A_ , **A_ )
@property
def a__ ( self : Tuple ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def a__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return self.d_model
def a__ ( self : List[str] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCamelCase_ = self.backbone_config.to_dict()
lowerCamelCase_ = self.__class__.model_type
return output
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = version.parse('''1.11''' )
@property
def a__ ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def a__ ( self : Any ) -> float:
"""simple docstring"""
return 1E-5
@property
def a__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return 12
| 70 |
"""simple docstring"""
import os
import time
import numpy as np
import onnxruntime as ort
lowerCAmelCase__ = '''1'''
lowerCAmelCase__ = '''0'''
lowerCAmelCase__ = '''1'''
lowerCAmelCase__ = ort.SessionOptions()
lowerCAmelCase__ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('''Create inference session...''')
lowerCAmelCase__ = ['''TensorrtExecutionProvider''', '''CUDAExecutionProvider''']
lowerCAmelCase__ = ort.InferenceSession('''model.onnx''', sess_options=sess_opt, providers=execution_provider)
lowerCAmelCase__ = ort.RunOptions()
lowerCAmelCase__ = 128
lowerCAmelCase__ = 1
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
print('''Warm up phase...''')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Start inference...''')
lowerCAmelCase__ = time.time()
lowerCAmelCase__ = 2000
lowerCAmelCase__ = {}
for iter in range(max_iters):
lowerCAmelCase__ = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Average Inference Time = {:.3f} ms'''.format((time.time() - start_time) * 1000 / max_iters))
| 83 | 0 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_lowerCamelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_lowerCamelCase = """ \"\"\"
Output class for the scheduler's step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"\"\"
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
"""
class _snake_case (unittest.TestCase):
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Union[str, Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir ,"schedulers/" ) )
UpperCAmelCase_ : Tuple = self.diffusers_dir
shutil.copy(
os.path.join(_snake_case ,"src/diffusers/schedulers/scheduling_ddpm.py" ) ,os.path.join(self.diffusers_dir ,"schedulers/scheduling_ddpm.py" ) ,)
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case=None ):
UpperCAmelCase_ : Any = comment + f'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
UpperCAmelCase_ : List[str] = comment + f'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
UpperCAmelCase_ : List[str] = black.Mode(target_versions={black.TargetVersion.PYaa} ,line_length=1_19 )
UpperCAmelCase_ : Union[str, Any] = black.format_str(_snake_case ,mode=_snake_case )
UpperCAmelCase_ : Any = os.path.join(self.diffusers_dir ,"new_code.py" )
with open(_snake_case ,"w" ,newline="\n" ) as f:
f.write(_snake_case )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_snake_case ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name ,overwrite=_snake_case )
with open(_snake_case ,"r" ) as f:
self.assertTrue(f.read() ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
# Base copy consistency
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" ,"DDPMSchedulerOutput" ,REFERENCE_CODE + "\n" ,)
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" ,"DDPMSchedulerOutput" ,_snake_case ,)
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" ,"TestSchedulerOutput" ,re.sub("DDPM" ,"Test" ,_snake_case ) ,)
# Copy consistency with a really long name
UpperCAmelCase_ : str = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
f'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}''' ,f'''{long_class_name}SchedulerOutput''' ,re.sub("Bert" ,_snake_case ,_snake_case ) ,)
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" ,"TestSchedulerOutput" ,_snake_case ,overwrite_result=re.sub("DDPM" ,"Test" ,_snake_case ) ,)
| 71 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def snake_case_ ( A_ : float, A_ : float, A_ : int ):
'''simple docstring'''
_lowerCamelCase : List[Any] = x
_lowerCamelCase : List[Any] = y
for step in range(A_ ): # noqa: B007
_lowerCamelCase : Dict = a * a - b * b + x
_lowerCamelCase : List[str] = 2 * a * b + y
_lowerCamelCase : Any = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def snake_case_ ( A_ : float ):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def snake_case_ ( A_ : float ):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(A_, 1, 1 ) )
def snake_case_ ( A_ : int = 8_00, A_ : int = 6_00, A_ : float = -0.6, A_ : float = 0, A_ : float = 3.2, A_ : int = 50, A_ : bool = True, ):
'''simple docstring'''
_lowerCamelCase : Tuple = Image.new('''RGB''', (image_width, image_height) )
_lowerCamelCase : int = img.load()
# loop through the image-coordinates
for image_x in range(A_ ):
for image_y in range(A_ ):
# determine the figure-coordinates based on the image-coordinates
_lowerCamelCase : Optional[Any] = figure_width / image_width * image_height
_lowerCamelCase : List[Any] = figure_center_x + (image_x / image_width - 0.5) * figure_width
_lowerCamelCase : Optional[Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
_lowerCamelCase : str = get_distance(A_, A_, A_ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_lowerCamelCase : Dict = get_color_coded_rgb(A_ )
else:
_lowerCamelCase : str = get_black_and_white_rgb(A_ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowerCAmelCase__ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 83 | 0 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __magic_name__ :
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=32 , snake_case_=2 , snake_case_=3 , snake_case_=16 , snake_case_=[1, 2, 1] , snake_case_=[2, 2, 4] , snake_case_=2 , snake_case_=2.0 , snake_case_=True , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_="gelu" , snake_case_=False , snake_case_=True , snake_case_=0.02 , snake_case_=1E-5 , snake_case_=True , snake_case_=None , snake_case_=True , snake_case_=10 , snake_case_=8 , ):
lowercase =parent
lowercase =batch_size
lowercase =image_size
lowercase =patch_size
lowercase =num_channels
lowercase =embed_dim
lowercase =depths
lowercase =num_heads
lowercase =window_size
lowercase =mlp_ratio
lowercase =qkv_bias
lowercase =hidden_dropout_prob
lowercase =attention_probs_dropout_prob
lowercase =drop_path_rate
lowercase =hidden_act
lowercase =use_absolute_embeddings
lowercase =patch_norm
lowercase =layer_norm_eps
lowercase =initializer_range
lowercase =is_training
lowercase =scope
lowercase =use_labels
lowercase =type_sequence_label_size
lowercase =encoder_stride
def _A( self ):
lowercase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase =None
if self.use_labels:
lowercase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase =self.get_config()
return config, pixel_values, labels
def _A( self ):
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _A( self , snake_case_ , snake_case_ , snake_case_ ):
lowercase =SwinvaModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
lowercase =model(snake_case_ )
lowercase =((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase =int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _A( self , snake_case_ , snake_case_ , snake_case_ ):
lowercase =SwinvaForMaskedImageModeling(config=snake_case_ )
model.to(snake_case_ )
model.eval()
lowercase =model(snake_case_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase =1
lowercase =SwinvaForMaskedImageModeling(snake_case_ )
model.to(snake_case_ )
model.eval()
lowercase =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase =model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _A( self , snake_case_ , snake_case_ , snake_case_ ):
lowercase =self.type_sequence_label_size
lowercase =SwinvaForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
lowercase =model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _A( self ):
lowercase =self.prepare_config_and_inputs()
lowercase , lowercase , lowercase =config_and_inputs
lowercase ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
UpperCamelCase__ = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
UpperCamelCase__ = (
{'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def _A( self ):
lowercase =SwinvaModelTester(self )
lowercase =ConfigTester(self , config_class=snake_case_ , embed_dim=37 )
def _A( self ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _A( self ):
lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' )
def _A( self ):
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''' )
def _A( self ):
pass
def _A( self ):
lowercase , lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase =model_class(snake_case_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_ , nn.Linear ) )
def _A( self ):
lowercase , lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase =model_class(snake_case_ )
lowercase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase =[*signature.parameters.keys()]
lowercase =['''pixel_values''']
self.assertListEqual(arg_names[:1] , snake_case_ )
def _A( self ):
lowercase , lowercase =self.model_tester.prepare_config_and_inputs_for_common()
lowercase =True
for model_class in self.all_model_classes:
lowercase =True
lowercase =False
lowercase =True
lowercase =model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
lowercase =model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
lowercase =outputs.attentions
lowercase =len(self.model_tester.depths )
self.assertEqual(len(snake_case_ ) , snake_case_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase =True
lowercase =config.window_size**2
lowercase =model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
lowercase =model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
lowercase =outputs.attentions
self.assertEqual(len(snake_case_ ) , snake_case_ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
lowercase =len(snake_case_ )
# Check attention is always last and order is fine
lowercase =True
lowercase =True
lowercase =model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
lowercase =model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
if hasattr(self.model_tester , '''num_hidden_states_types''' ):
lowercase =self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
lowercase =2
self.assertEqual(out_len + added_hidden_states , len(snake_case_ ) )
lowercase =outputs.attentions
self.assertEqual(len(snake_case_ ) , snake_case_ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
lowercase =model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
lowercase =model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
lowercase =outputs.hidden_states
lowercase =getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(snake_case_ ) , snake_case_ )
# Swinv2 has a different seq_length
lowercase =(
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowercase =outputs.reshaped_hidden_states
self.assertEqual(len(snake_case_ ) , snake_case_ )
lowercase , lowercase , lowercase , lowercase =reshaped_hidden_states[0].shape
lowercase =(
reshaped_hidden_states[0].view(snake_case_ , snake_case_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _A( self ):
lowercase , lowercase =self.model_tester.prepare_config_and_inputs_for_common()
lowercase =(
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowercase =True
self.check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase =True
self.check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def _A( self ):
lowercase , lowercase =self.model_tester.prepare_config_and_inputs_for_common()
lowercase =3
lowercase =(
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase =(
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase =image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase =image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowercase =True
self.check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase =True
self.check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ , (padded_height, padded_width) )
def _A( self ):
lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*snake_case_ )
def _A( self ):
lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def _A( self ):
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase =SwinvaModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def _A( self ):
lowercase , lowercase =self.model_tester.prepare_config_and_inputs_for_common()
lowercase =_config_zero_init(snake_case_ )
for model_class in self.all_model_classes:
lowercase =model_class(config=snake_case_ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class __magic_name__ ( unittest.TestCase ):
@cached_property
def _A( self ):
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' )
if is_vision_available()
else None
)
@slow
def _A( self ):
lowercase =SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to(
snake_case_ )
lowercase =self.default_image_processor
lowercase =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowercase =image_processor(images=snake_case_ , return_tensors='''pt''' ).to(snake_case_ )
# forward pass
with torch.no_grad():
lowercase =model(**snake_case_ )
# verify the logits
lowercase =torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , snake_case_ )
lowercase =torch.tensor([-0.39_47, -0.43_06, 0.00_26] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1E-4 ) )
| 72 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def snake_case_ ( A_ : Tuple, A_ : List[str], A_ : Optional[Any], A_ : Dict, A_ : Dict=True, A_ : int="pt" ):
'''simple docstring'''
_lowerCamelCase : str = {'''add_prefix_space''': True} if isinstance(A_, A_ ) and not line.startswith(''' ''' ) else {}
_lowerCamelCase : Union[str, Any] = padding_side
return tokenizer(
[line], max_length=A_, padding='''max_length''' if pad_to_max_length else None, truncation=A_, return_tensors=A_, add_special_tokens=A_, **A_, )
def snake_case_ ( A_ : Any, A_ : Optional[int], A_ : List[Any]=None, ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = input_ids.ne(A_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __snake_case ( _lowercase):
def __init__( self : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple="train" , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Any=None , __lowerCAmelCase : Union[str, Any]="" , ):
"""simple docstring"""
super().__init__()
_lowerCamelCase : Optional[int] = Path(__lowerCAmelCase ).joinpath(type_path + '''.source''' )
_lowerCamelCase : List[str] = Path(__lowerCAmelCase ).joinpath(type_path + '''.target''' )
_lowerCamelCase : List[Any] = self.get_char_lens(self.src_file )
_lowerCamelCase : Optional[int] = max_source_length
_lowerCamelCase : Optional[Any] = max_target_length
assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}'''
_lowerCamelCase : List[Any] = tokenizer
_lowerCamelCase : List[Any] = prefix
if n_obs is not None:
_lowerCamelCase : List[str] = self.src_lens[:n_obs]
_lowerCamelCase : int = src_lang
_lowerCamelCase : Union[str, Any] = tgt_lang
def __len__( self : int ):
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self : Dict , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = index + 1 # linecache starts at 1
_lowerCamelCase : Union[str, Any] = self.prefix + linecache.getline(str(self.src_file ) , __lowerCAmelCase ).rstrip('''\n''' )
_lowerCamelCase : Optional[Any] = linecache.getline(str(self.tgt_file ) , __lowerCAmelCase ).rstrip('''\n''' )
assert source_line, f'''empty source line for index {index}'''
assert tgt_line, f'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , __lowerCAmelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_lowerCamelCase : Optional[int] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , __lowerCAmelCase ) else self.tokenizer
)
_lowerCamelCase : Union[str, Any] = self.tokenizer.generator if isinstance(self.tokenizer , __lowerCAmelCase ) else self.tokenizer
_lowerCamelCase : List[str] = encode_line(__lowerCAmelCase , __lowerCAmelCase , self.max_source_length , '''right''' )
_lowerCamelCase : List[str] = encode_line(__lowerCAmelCase , __lowerCAmelCase , self.max_target_length , '''right''' )
_lowerCamelCase : Optional[Any] = source_inputs['''input_ids'''].squeeze()
_lowerCamelCase : Union[str, Any] = target_inputs['''input_ids'''].squeeze()
_lowerCamelCase : Any = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase : str ):
"""simple docstring"""
return [len(__lowerCAmelCase ) for x in Path(__lowerCAmelCase ).open().readlines()]
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : List[Any] = torch.stack([x['''input_ids'''] for x in batch] )
_lowerCamelCase : Tuple = torch.stack([x['''attention_mask'''] for x in batch] )
_lowerCamelCase : Union[str, Any] = torch.stack([x['''decoder_input_ids'''] for x in batch] )
_lowerCamelCase : Tuple = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , __lowerCAmelCase )
else self.tokenizer.pad_token_id
)
_lowerCamelCase : Tuple = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , __lowerCAmelCase )
else self.tokenizer.pad_token_id
)
_lowerCamelCase : Union[str, Any] = trim_batch(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : List[str] = trim_batch(__lowerCAmelCase , __lowerCAmelCase , attention_mask=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
lowerCAmelCase__ = getLogger(__name__)
def snake_case_ ( A_ : List[List] ):
'''simple docstring'''
return list(itertools.chain.from_iterable(A_ ) )
def snake_case_ ( A_ : str ):
'''simple docstring'''
_lowerCamelCase : Dict = get_git_info()
save_json(A_, os.path.join(A_, '''git_log.json''' ) )
def snake_case_ ( A_ : str, A_ : Union[str, Any], A_ : int=4, **A_ : Optional[int] ):
'''simple docstring'''
with open(A_, '''w''' ) as f:
json.dump(A_, A_, indent=A_, **A_ )
def snake_case_ ( A_ : Any ):
'''simple docstring'''
with open(A_ ) as f:
return json.load(A_ )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : List[str] = git.Repo(search_parent_directories=A_ )
_lowerCamelCase : str = {
'''repo_id''': str(A_ ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def snake_case_ ( A_ : Callable, A_ : Iterable ):
'''simple docstring'''
return list(map(A_, A_ ) )
def snake_case_ ( A_ : str, A_ : Tuple ):
'''simple docstring'''
with open(A_, '''wb''' ) as f:
return pickle.dump(A_, A_ )
def snake_case_ ( A_ : List[str] ):
'''simple docstring'''
def remove_articles(A_ : str ):
return re.sub(R'''\b(a|an|the)\b''', ''' ''', A_ )
def white_space_fix(A_ : Any ):
return " ".join(text.split() )
def remove_punc(A_ : List[Any] ):
_lowerCamelCase : Any = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(A_ : Optional[int] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(A_ ) ) ) )
def snake_case_ ( A_ : int, A_ : List[Any] ):
'''simple docstring'''
_lowerCamelCase : str = normalize_answer(A_ ).split()
_lowerCamelCase : int = normalize_answer(A_ ).split()
_lowerCamelCase : str = Counter(A_ ) & Counter(A_ )
_lowerCamelCase : Any = sum(common.values() )
if num_same == 0:
return 0
_lowerCamelCase : int = 1.0 * num_same / len(A_ )
_lowerCamelCase : str = 1.0 * num_same / len(A_ )
_lowerCamelCase : List[Any] = (2 * precision * recall) / (precision + recall)
return fa
def snake_case_ ( A_ : Dict, A_ : str ):
'''simple docstring'''
return normalize_answer(A_ ) == normalize_answer(A_ )
def snake_case_ ( A_ : List[str], A_ : List[str] ):
'''simple docstring'''
assert len(A_ ) == len(A_ )
_lowerCamelCase : Optional[Any] = 0
for hypo, pred in zip(A_, A_ ):
em += exact_match_score(A_, A_ )
if len(A_ ) > 0:
em /= len(A_ )
return {"em": em}
def snake_case_ ( A_ : Optional[int] ):
'''simple docstring'''
return model_prefix.startswith('''rag''' )
def snake_case_ ( A_ : Dict, A_ : int, A_ : List[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_lowerCamelCase : Tuple = '''dropout_rate'''
for p in extra_params:
if getattr(A_, A_, A_ ):
if not hasattr(A_, A_ ) and not hasattr(A_, equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(A_ ) )
delattr(A_, A_ )
continue
_lowerCamelCase : Union[str, Any] = p if hasattr(A_, A_ ) else equivalent_param[p]
setattr(A_, A_, getattr(A_, A_ ) )
delattr(A_, A_ )
return hparams, config
| 83 | 0 |
import itertools
import math
def lowerCamelCase__ (_UpperCAmelCase):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = 2
while True:
if is_prime(_UpperCAmelCase):
yield num
num += 1
def lowerCamelCase__ (_UpperCAmelCase = 1_0001):
return next(itertools.islice(prime_generator() , nth - 1 , _UpperCAmelCase))
if __name__ == "__main__":
print(f"""{solution() = }""")
| 73 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class __snake_case ( _lowercase):
snake_case__ : Optional[Any] = "camembert"
def __init__( self : Optional[Any] , __lowerCAmelCase : Any=3_0_5_2_2 , __lowerCAmelCase : List[str]=7_6_8 , __lowerCAmelCase : List[str]=1_2 , __lowerCAmelCase : Optional[int]=1_2 , __lowerCAmelCase : List[Any]=3_0_7_2 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Optional[int]=5_1_2 , __lowerCAmelCase : str=2 , __lowerCAmelCase : int=0.02 , __lowerCAmelCase : List[Any]=1E-12 , __lowerCAmelCase : Union[str, Any]=1 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : str="absolute" , __lowerCAmelCase : Any=True , __lowerCAmelCase : Optional[int]=None , **__lowerCAmelCase : Optional[int] , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : str = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Any = num_attention_heads
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : List[str] = intermediate_size
_lowerCamelCase : Optional[Any] = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Optional[Any] = max_position_embeddings
_lowerCamelCase : Tuple = type_vocab_size
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : Dict = layer_norm_eps
_lowerCamelCase : List[Any] = position_embedding_type
_lowerCamelCase : int = use_cache
_lowerCamelCase : List[str] = classifier_dropout
class __snake_case ( _lowercase):
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
if self.task == "multiple-choice":
_lowerCamelCase : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_lowerCamelCase : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 83 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""ut/deta""": """https://huggingface.co/ut/deta/resolve/main/config.json""",
}
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''deta'''
lowerCAmelCase_ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : List[str] , _A : Dict=None , _A : Optional[int]=900 , _A : Dict=2048 , _A : int=6 , _A : Dict=2048 , _A : List[Any]=8 , _A : Union[str, Any]=6 , _A : Tuple=1024 , _A : Tuple=8 , _A : Dict=0.0 , _A : List[Any]=True , _A : List[Any]="relu" , _A : Tuple=256 , _A : Union[str, Any]=0.1 , _A : Optional[Any]=0.0 , _A : Dict=0.0 , _A : int=0.02 , _A : int=1.0 , _A : Optional[Any]=True , _A : Any=False , _A : Optional[int]="sine" , _A : Optional[int]=5 , _A : List[Any]=4 , _A : Tuple=4 , _A : Dict=True , _A : Tuple=300 , _A : Union[str, Any]=True , _A : Optional[Any]=True , _A : Optional[Any]=1 , _A : Dict=5 , _A : Optional[int]=2 , _A : Tuple=1 , _A : Union[str, Any]=1 , _A : Union[str, Any]=5 , _A : Tuple=2 , _A : str=0.1 , _A : Optional[int]=0.25 , **_A : Optional[Any] , ):
"""simple docstring"""
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__SCREAMING_SNAKE_CASE : Dict = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] )
else:
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : int = backbone_config.pop('''model_type''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
__SCREAMING_SNAKE_CASE : Any = config_class.from_dict(_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = backbone_config
__SCREAMING_SNAKE_CASE : Tuple = num_queries
__SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
__SCREAMING_SNAKE_CASE : Tuple = d_model
__SCREAMING_SNAKE_CASE : Any = encoder_ffn_dim
__SCREAMING_SNAKE_CASE : int = encoder_layers
__SCREAMING_SNAKE_CASE : List[str] = encoder_attention_heads
__SCREAMING_SNAKE_CASE : Tuple = decoder_ffn_dim
__SCREAMING_SNAKE_CASE : str = decoder_layers
__SCREAMING_SNAKE_CASE : Tuple = decoder_attention_heads
__SCREAMING_SNAKE_CASE : Tuple = dropout
__SCREAMING_SNAKE_CASE : int = attention_dropout
__SCREAMING_SNAKE_CASE : Dict = activation_dropout
__SCREAMING_SNAKE_CASE : str = activation_function
__SCREAMING_SNAKE_CASE : List[str] = init_std
__SCREAMING_SNAKE_CASE : Union[str, Any] = init_xavier_std
__SCREAMING_SNAKE_CASE : Optional[Any] = encoder_layerdrop
__SCREAMING_SNAKE_CASE : Tuple = auxiliary_loss
__SCREAMING_SNAKE_CASE : Optional[int] = position_embedding_type
# deformable attributes
__SCREAMING_SNAKE_CASE : Dict = num_feature_levels
__SCREAMING_SNAKE_CASE : Optional[int] = encoder_n_points
__SCREAMING_SNAKE_CASE : Dict = decoder_n_points
__SCREAMING_SNAKE_CASE : List[str] = two_stage
__SCREAMING_SNAKE_CASE : Dict = two_stage_num_proposals
__SCREAMING_SNAKE_CASE : List[Any] = with_box_refine
__SCREAMING_SNAKE_CASE : Optional[Any] = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
__SCREAMING_SNAKE_CASE : Optional[int] = class_cost
__SCREAMING_SNAKE_CASE : List[Any] = bbox_cost
__SCREAMING_SNAKE_CASE : str = giou_cost
# Loss coefficients
__SCREAMING_SNAKE_CASE : Optional[int] = mask_loss_coefficient
__SCREAMING_SNAKE_CASE : Tuple = dice_loss_coefficient
__SCREAMING_SNAKE_CASE : int = bbox_loss_coefficient
__SCREAMING_SNAKE_CASE : str = giou_loss_coefficient
__SCREAMING_SNAKE_CASE : int = eos_coefficient
__SCREAMING_SNAKE_CASE : List[str] = focal_alpha
super().__init__(is_encoder_decoder=_A , **_A )
@property
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
return self.d_model
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = copy.deepcopy(self.__dict__ )
__SCREAMING_SNAKE_CASE : List[Any] = self.backbone_config.to_dict()
__SCREAMING_SNAKE_CASE : Optional[Any] = self.__class__.model_type
return output
| 74 |
"""simple docstring"""
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCAmelCase__ = '''<<<<<<< This should probably be modified because it mentions: '''
lowerCAmelCase__ = '''=======
>>>>>>>
'''
lowerCAmelCase__ = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
lowerCAmelCase__ = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def snake_case_ ( A_ : Namespace ):
'''simple docstring'''
return ConvertCommand(args.tfds_path, args.datasets_directory )
class __snake_case ( _lowercase):
@staticmethod
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase : ArgumentParser ):
"""simple docstring"""
_lowerCamelCase : List[str] = parser.add_parser(
'''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , )
train_parser.add_argument(
'''--tfds_path''' , type=__lowerCAmelCase , required=__lowerCAmelCase , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , )
train_parser.add_argument(
'''--datasets_directory''' , type=__lowerCAmelCase , required=__lowerCAmelCase , help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=__lowerCAmelCase )
def __init__( self : str , __lowerCAmelCase : str , __lowerCAmelCase : str , *__lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : List[str] = get_logger('''datasets-cli/converting''' )
_lowerCamelCase : int = tfds_path
_lowerCamelCase : Dict = datasets_directory
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
if os.path.isdir(self._tfds_path ):
_lowerCamelCase : Union[str, Any] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
_lowerCamelCase : Dict = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
_lowerCamelCase : int = os.path.abspath(self._datasets_directory )
self._logger.info(f'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
_lowerCamelCase : str = []
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : Union[str, Any] = {}
if os.path.isdir(self._tfds_path ):
_lowerCamelCase : List[str] = os.listdir(__lowerCAmelCase )
else:
_lowerCamelCase : Optional[Any] = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'''Looking at file {f_name}''' )
_lowerCamelCase : Union[str, Any] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Optional[Any] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
if not os.path.isfile(__lowerCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(__lowerCAmelCase , encoding='''utf-8''' ) as f:
_lowerCamelCase : Tuple = f.readlines()
_lowerCamelCase : Optional[int] = []
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : int = False
_lowerCamelCase : Tuple = []
for line in lines:
_lowerCamelCase : Optional[int] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
_lowerCamelCase : Union[str, Any] = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
_lowerCamelCase : List[str] = ''''''
continue
elif "from absl import logging" in out_line:
_lowerCamelCase : str = '''from datasets import logging\n'''
elif "getLogger" in out_line:
_lowerCamelCase : Union[str, Any] = out_line.replace('''getLogger''' , '''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
_lowerCamelCase : Dict = True
_lowerCamelCase : Optional[int] = list(filter(lambda __lowerCAmelCase : e in out_line , __lowerCAmelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(__lowerCAmelCase ) + '''\n''' )
out_lines.append(__lowerCAmelCase )
out_lines.append(__lowerCAmelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
_lowerCamelCase : str = re.sub(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
_lowerCamelCase : Dict = re.match(R'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , __lowerCAmelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
_lowerCamelCase : Union[str, Any] = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
_lowerCamelCase : Any = True
out_lines.append(__lowerCAmelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
_lowerCamelCase : Union[str, Any] = f_name.replace('''.py''' , '''''' )
_lowerCamelCase : List[str] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[Any] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
self._logger.info(f'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(__lowerCAmelCase )
if needs_manual_update:
with_manual_update.append(__lowerCAmelCase )
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.writelines(__lowerCAmelCase )
self._logger.info(f'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
_lowerCamelCase : Optional[int] = os.path.basename(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = imports_to_builder_map[f_name.replace('''.py''' , '''''' )]
self._logger.info(f'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(__lowerCAmelCase , __lowerCAmelCase )
except KeyError:
self._logger.error(f'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 83 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class lowerCamelCase_ :
def __init__( self : Any , _A : int , ):
'''simple docstring'''
UpperCAmelCase__ : Any = parent
UpperCAmelCase__ : str = 13
UpperCAmelCase__ : int = 7
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : List[str] = True
UpperCAmelCase__ : Dict = True
UpperCAmelCase__ : Dict = 99
UpperCAmelCase__ : List[Any] = 32
UpperCAmelCase__ : List[str] = 2
UpperCAmelCase__ : Any = 4
UpperCAmelCase__ : str = 37
UpperCAmelCase__ : Any = '''gelu'''
UpperCAmelCase__ : Optional[int] = 0.1
UpperCAmelCase__ : Dict = 0.1
UpperCAmelCase__ : Tuple = 512
UpperCAmelCase__ : str = 16
UpperCAmelCase__ : List[Any] = 2
UpperCAmelCase__ : Union[str, Any] = 0.0_2
UpperCAmelCase__ : Union[str, Any] = 3
UpperCAmelCase__ : Any = 4
UpperCAmelCase__ : Any = None
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : Optional[int] = None
if self.use_input_mask:
UpperCAmelCase__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ : Dict = None
UpperCAmelCase__ : int = None
UpperCAmelCase__ : Dict = None
if self.use_labels:
UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ : Any = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Dict = self.prepare_config_and_inputs()
UpperCAmelCase__ : List[str] = True
UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowercase_ ( self : Optional[Any] , _A : Union[str, Any] , _A : Union[str, Any] , _A : int , _A : int , _A : Union[str, Any] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : Any = TFEsmModel(config=_A )
UpperCAmelCase__ : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
UpperCAmelCase__ : str = model(_A )
UpperCAmelCase__ : str = [input_ids, input_mask]
UpperCAmelCase__ : List[Any] = model(_A )
UpperCAmelCase__ : Optional[int] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : List[str] , _A : str , _A : Tuple , _A : Any , _A : Any , _A : Union[str, Any] , _A : int , _A : Optional[Any] , _A : str , ):
'''simple docstring'''
UpperCAmelCase__ : int = True
UpperCAmelCase__ : Any = TFEsmModel(config=_A )
UpperCAmelCase__ : Tuple = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''encoder_hidden_states''': encoder_hidden_states,
'''encoder_attention_mask''': encoder_attention_mask,
}
UpperCAmelCase__ : List[str] = model(_A )
UpperCAmelCase__ : Union[str, Any] = [input_ids, input_mask]
UpperCAmelCase__ : List[str] = model(_A , encoder_hidden_states=_A )
# Also check the case where encoder outputs are not passed
UpperCAmelCase__ : str = model(_A , attention_mask=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : List[str] , _A : int , _A : Tuple , _A : List[Any] , _A : Dict , _A : List[Any] , _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = TFEsmForMaskedLM(config=_A )
UpperCAmelCase__ : str = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self : int , _A : List[str] , _A : List[Any] , _A : List[Any] , _A : str , _A : Union[str, Any] , _A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.num_labels
UpperCAmelCase__ : Any = TFEsmForTokenClassification(config=_A )
UpperCAmelCase__ : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
UpperCAmelCase__ : List[Any] = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase__ : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = TFEsmModelTester(self )
UpperCAmelCase__ : Optional[Any] = ConfigTester(self , config_class=_A , hidden_size=37 )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def lowercase_ ( self : str ):
'''simple docstring'''
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : int = TFEsmModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@unittest.skip('''Protein models do not support embedding resizing.''' )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip('''Protein models do not support embedding resizing.''' )
def lowercase_ ( self : Dict ):
'''simple docstring'''
pass
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Dict = model_class(_A )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
UpperCAmelCase__ : List[Any] = model.get_bias()
assert isinstance(_A , _A )
for k, v in name.items():
assert isinstance(_A , tf.Variable )
else:
UpperCAmelCase__ : Union[str, Any] = model.get_output_embeddings()
assert x is None
UpperCAmelCase__ : Tuple = model.get_bias()
assert name is None
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = TFEsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
UpperCAmelCase__ : Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase__ : Optional[Any] = model(_A )[0]
UpperCAmelCase__ : List[Any] = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , _A )
# compare the actual values for a slice.
UpperCAmelCase__ : Dict = tf.constant(
[
[
[8.9_2_1_5_1_8, -1_0.5_8_9_8_1_4, -6.4_6_7_1_3_0_7],
[-6.3_9_6_7_1_5_6, -1_3.9_1_1_3_7_7, -1.1_2_1_1_9_1_5],
[-7.7_8_1_2_4_7, -1_3.9_5_1_5_5_7, -3.7_4_0_5_9_2],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = TFEsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
UpperCAmelCase__ : List[Any] = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
UpperCAmelCase__ : Any = model(_A )[0]
# compare the actual values for a slice.
UpperCAmelCase__ : Optional[int] = tf.constant(
[
[
[0.1_4_4_4_3_0_9_2, 0.5_4_1_2_5_3_2_7, 0.3_2_4_7_7_3_9],
[0.3_0_3_4_0_4_8_4, 0.0_0_5_2_6_6_7_6, 0.3_1_0_7_7_7_2_2],
[0.3_2_2_7_8_0_4_3, -0.2_4_9_8_7_0_9_6, 0.3_4_1_4_6_2_8],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 75 |
"""simple docstring"""
def snake_case_ ( A_ : list, A_ : list, A_ : int, A_ : int, A_ : int ):
'''simple docstring'''
if index == number_of_items:
return 0
_lowerCamelCase : int = 0
_lowerCamelCase : str = 0
_lowerCamelCase : Dict = knapsack(A_, A_, A_, A_, index + 1 )
if weights[index] <= max_weight:
_lowerCamelCase : Tuple = values[index] + knapsack(
A_, A_, A_, max_weight - weights[index], index + 1 )
return max(A_, A_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 | 0 |
"""simple docstring"""
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
a_ = {
'/attention/': '/0/SelfAttention/',
'/self_attention/': '/0/SelfAttention/',
'/encoder_decoder_attention/': '/1/EncDecAttention/',
'value': 'v',
'query': 'q',
'key': 'k',
'out': 'o',
'pre_self_attention_layer_norm': '0/layer_norm',
'pre_cross_attention_layer_norm': '1/layer_norm',
'pre_attention_layer_norm': '0/layer_norm', # previously 1, but seems wrong
'token_embedder': 'shared',
'encoder_norm': 'final_layer_norm',
'decoder_norm': 'final_layer_norm',
'relpos_bias/rel_embedding': 'block/0/layer/0/SelfAttention/relative_attention_bias/weight',
'router/router_weights/w/': 'router/classifier/',
'roer/roer_weights/w/': 'router/classifier/',
'logits_dense': 'lm_head',
}
def __UpperCAmelCase ( __UpperCamelCase ):
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
__lowercase : List[Any] = list(s_dict.keys() )
for key in keys:
__lowercase : Optional[int] = R'''.*/layers_(\d+)'''
__lowercase : Tuple = key
if re.match(__UpperCamelCase , __UpperCamelCase ):
__lowercase : Optional[Any] = re.sub(R'''layers_(\d+)''' , R'''block/\1/layer''' , __UpperCamelCase )
__lowercase : str = R'''(encoder|decoder)\/'''
if re.match(__UpperCamelCase , __UpperCamelCase ):
__lowercase : Union[str, Any] = re.match(__UpperCamelCase , __UpperCamelCase ).groups()
if groups[0] == "encoder":
__lowercase : Dict = re.sub(R'''/mlp/''' , R'''/1/mlp/''' , __UpperCamelCase )
__lowercase : Union[str, Any] = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/1/layer_norm/''' , __UpperCamelCase )
elif groups[0] == "decoder":
__lowercase : Optional[Any] = re.sub(R'''/mlp/''' , R'''/2/mlp/''' , __UpperCamelCase )
__lowercase : Dict = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/2/layer_norm/''' , __UpperCamelCase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
__lowercase : Optional[Any] = new_key.replace(__UpperCamelCase , __UpperCamelCase )
print(f"""{key} -> {new_key}""" )
__lowercase : Union[str, Any] = s_dict.pop(__UpperCamelCase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
__lowercase : Optional[Any] = s_dict[
'''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
__lowercase : List[str] = s_dict[
'''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
__lowercase : List[str] = s_dict[key].shape[0]
__lowercase : str = s_dict[key]
for idx in range(__UpperCamelCase ):
__lowercase : str = expert_weihts[idx]
print(f"""{key} -> {key.replace("expert/" , "nested fstring" )}""" )
s_dict.pop(__UpperCamelCase )
return s_dict
a_ = {
'NUM_ENCODER_LAYERS': 'num_layers',
'NUM_DECODER_LAYERS': 'num_decoder_layers',
'NUM_HEADS': 'num_heads',
'HEAD_DIM': 'd_kv',
'EMBED_DIM': 'd_model',
'MLP_DIM': 'd_ff',
'NUM_SELECTED_EXPERTS': 'num_selected_experts',
'NUM_ENCODER_SPARSE_LAYERS': 'num_sparse_encoder_layers',
'NUM_DECODER_SPARSE_LAYERS': 'num_sparse_decoder_layers',
'dense.MlpBlock.activations': 'feed_forward_proj',
}
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
# Convert a google style config to the hugging face fromat
import regex as re
with open(__UpperCamelCase , '''r''' ) as f:
__lowercase : Dict = f.read()
__lowercase : Tuple = re.findall(R'''(.*) = ([0-9.]*)''' , __UpperCamelCase )
__lowercase : Union[str, Any] = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
__lowercase : Tuple = float(__UpperCamelCase ) if '''.''' in value else int(__UpperCamelCase )
__lowercase : Any = re.findall(R'''(.*activations) = \(\'(.*)\',\)''' , __UpperCamelCase )[0]
__lowercase : Optional[int] = str(activation[1] )
__lowercase : int = num_experts
__lowercase : Optional[int] = SwitchTransformersConfig(**__UpperCamelCase )
return config
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase="./" , __UpperCamelCase=8 ):
# Initialise PyTorch model
print(f"""Loading flax weights from : {flax_checkpoint_path}""" )
__lowercase : Optional[int] = checkpoints.load_tax_checkpoint(__UpperCamelCase )
if gin_file is not None:
__lowercase : Union[str, Any] = convert_gin_to_config(__UpperCamelCase , __UpperCamelCase )
else:
__lowercase : List[Any] = SwitchTransformersConfig.from_pretrained(__UpperCamelCase )
__lowercase : Optional[Any] = SwitchTransformersForConditionalGeneration(__UpperCamelCase )
__lowercase : int = flax_params['''target''']
__lowercase : List[Any] = flatten_dict(__UpperCamelCase , sep='''/''' )
__lowercase : int = rename_keys(__UpperCamelCase )
__lowercase : Any = unflatten_dict(__UpperCamelCase , sep='''/''' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(__UpperCamelCase , __UpperCamelCase )
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
pt_model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'
' model architecture. If not provided, a `gin_file` has to be provided.'
),
)
parser.add_argument(
'--gin_file',
default=None,
type=str,
required=False,
help='Path to the gin config file. If not provided, a `config_file` has to be passed ',
)
parser.add_argument(
'--config_name', default=None, type=str, required=False, help='Config name of SwitchTransformers model.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output pytorch model.'
)
parser.add_argument('--num_experts', default=8, type=int, required=False, help='Number of experts')
a_ = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 76 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __snake_case ( metaclass=_lowercase):
snake_case__ : Optional[Any] = ["transformers", "torch", "note_seq"]
def __init__( self : Union[str, Any] , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : List[Any] ):
"""simple docstring"""
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[Any] , *__lowerCAmelCase : str , **__lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : int , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 83 | 0 |
"""simple docstring"""
A = """Input must be a string of 8 numbers plus letter"""
A = """TRWAGMYFPDXBNJZSQVHLCKE"""
def _UpperCamelCase ( UpperCamelCase ) -> bool:
"""simple docstring"""
if not isinstance(UpperCamelCase , UpperCamelCase ):
__UpperCAmelCase : List[Any] = f"Expected string as input, found {type(UpperCamelCase ).__name__}"
raise TypeError(UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = spanish_id.replace("-" , "" ).upper()
if len(UpperCamelCase ) != 9:
raise ValueError(UpperCamelCase )
try:
__UpperCAmelCase : int = int(spanish_id_clean[0:8] )
__UpperCAmelCase : Optional[Any] = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(UpperCamelCase ) from ex
if letter.isdigit():
raise ValueError(UpperCamelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 77 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __snake_case ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Any = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
_lowerCamelCase : Any = AutoTokenizer.from_pretrained('''google/mt5-small''' )
_lowerCamelCase : Union[str, Any] = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
_lowerCamelCase : Optional[int] = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
_lowerCamelCase : List[Any] = shift_tokens_right(__lowerCAmelCase , model.config.pad_token_id , model.config.decoder_start_token_id )
_lowerCamelCase : int = model(__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase ).logits
_lowerCamelCase : Optional[Any] = optax.softmax_cross_entropy(__lowerCAmelCase , onehot(__lowerCAmelCase , logits.shape[-1] ) ).mean()
_lowerCamelCase : Dict = -(labels.shape[-1] * loss.item())
_lowerCamelCase : Dict = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 83 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: List[str] =argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
SCREAMING_SNAKE_CASE_: Dict =parser.parse_args()
if args.model_type == "bert":
SCREAMING_SNAKE_CASE_: Optional[Any] =BertForMaskedLM.from_pretrained(args.model_name)
SCREAMING_SNAKE_CASE_: str ='bert'
else:
raise ValueError('args.model_type should be "bert".')
SCREAMING_SNAKE_CASE_: Any =model.state_dict()
SCREAMING_SNAKE_CASE_: int ={}
for w in ["word_embeddings", "position_embeddings"]:
SCREAMING_SNAKE_CASE_: Union[str, Any] =state_dict[f"{prefix}.embeddings.{w}.weight"]
for w in ["weight", "bias"]:
SCREAMING_SNAKE_CASE_: List[str] =state_dict[f"{prefix}.embeddings.LayerNorm.{w}"]
SCREAMING_SNAKE_CASE_: str =0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
SCREAMING_SNAKE_CASE_: Optional[int] =state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"
]
SCREAMING_SNAKE_CASE_: Dict =state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"
]
SCREAMING_SNAKE_CASE_: Union[str, Any] =state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"
]
SCREAMING_SNAKE_CASE_: str =state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"
]
SCREAMING_SNAKE_CASE_: List[str] =state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"
]
SCREAMING_SNAKE_CASE_: Any =state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"
]
SCREAMING_SNAKE_CASE_: Tuple =state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"
]
SCREAMING_SNAKE_CASE_: Optional[Any] =state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"
]
std_idx += 1
SCREAMING_SNAKE_CASE_: Any =state_dict['cls.predictions.decoder.weight']
SCREAMING_SNAKE_CASE_: Union[str, Any] =state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
SCREAMING_SNAKE_CASE_: str =state_dict[f"cls.predictions.transform.dense.{w}"]
SCREAMING_SNAKE_CASE_: Optional[int] =state_dict[f"cls.predictions.transform.LayerNorm.{w}"]
print(f"N layers selected for distillation: {std_idx}")
print(f"Number of params transferred for distillation: {len(compressed_sd.keys())}")
print(f"Save transferred checkpoint to {args.dump_checkpoint}.")
torch.save(compressed_sd, args.dump_checkpoint)
| 78 |
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'''files''', [
['''full:README.md''', '''dataset_infos.json'''],
['''empty:README.md''', '''dataset_infos.json'''],
['''dataset_infos.json'''],
['''full:README.md'''],
], )
def snake_case_ ( A_ : Dict, A_ : List[str] ):
'''simple docstring'''
_lowerCamelCase : int = tmp_path_factory.mktemp('''dset_infos_dir''' )
if "full:README.md" in files:
with open(dataset_infos_dir / '''README.md''', '''w''' ) as f:
f.write('''---\ndataset_info:\n dataset_size: 42\n---''' )
if "empty:README.md" in files:
with open(dataset_infos_dir / '''README.md''', '''w''' ) as f:
f.write('''''' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / '''dataset_infos.json''', '''w''' ) as f:
f.write('''{"default": {"dataset_size": 42}}''' )
_lowerCamelCase : str = DatasetInfosDict.from_directory(A_ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'''dataset_info''', [
DatasetInfo(),
DatasetInfo(
description='''foo''', features=Features({'''a''': Value('''int32''' )} ), builder_name='''builder''', config_name='''config''', version='''1.0.0''', splits=[{'''name''': '''train'''}], download_size=42, ),
], )
def snake_case_ ( A_ : str, A_ : DatasetInfo ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = str(A_ )
dataset_info.write_to_directory(A_ )
_lowerCamelCase : str = DatasetInfo.from_directory(A_ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(A_, '''dataset_info.json''' ) )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = DatasetInfo(
description='''foo''', citation='''bar''', homepage='''https://foo.bar''', license='''CC0''', features=Features({'''a''': Value('''int32''' )} ), post_processed={}, supervised_keys=(), task_templates=[], builder_name='''builder''', config_name='''config''', version='''1.0.0''', splits=[{'''name''': '''train''', '''num_examples''': 42}], download_checksums={}, download_size=13_37, post_processing_size=4_42, dataset_size=12_34, size_in_bytes=13_37 + 4_42 + 12_34, )
_lowerCamelCase : Optional[Any] = dataset_info._to_yaml_dict()
assert sorted(A_ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key], (list, dict, int, str) )
_lowerCamelCase : str = yaml.safe_dump(A_ )
_lowerCamelCase : Tuple = yaml.safe_load(A_ )
assert dataset_info_yaml_dict == reloaded
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : int = DatasetInfo()
_lowerCamelCase : Dict = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'''dataset_infos_dict''', [
DatasetInfosDict(),
DatasetInfosDict({'''default''': DatasetInfo()} ),
DatasetInfosDict({'''my_config_name''': DatasetInfo()} ),
DatasetInfosDict(
{
'''default''': DatasetInfo(
description='''foo''', features=Features({'''a''': Value('''int32''' )} ), builder_name='''builder''', config_name='''config''', version='''1.0.0''', splits=[{'''name''': '''train'''}], download_size=42, )
} ),
DatasetInfosDict(
{
'''v1''': DatasetInfo(dataset_size=42 ),
'''v2''': DatasetInfo(dataset_size=13_37 ),
} ),
], )
def snake_case_ ( A_ : Optional[Any], A_ : DatasetInfosDict ):
'''simple docstring'''
_lowerCamelCase : List[str] = str(A_ )
dataset_infos_dict.write_to_directory(A_ )
_lowerCamelCase : List[Any] = DatasetInfosDict.from_directory(A_ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_lowerCamelCase : str = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_lowerCamelCase : Any = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(A_, '''README.md''' ) )
| 83 | 0 |
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase="shi-labs/oneformer_demo" ) -> str:
'''simple docstring'''
with open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="""dataset""" ) , """r""" ) as f:
UpperCAmelCase__ : int = json.load(__lowerCamelCase )
UpperCAmelCase__ : Dict = {}
UpperCAmelCase__ : Dict = []
UpperCAmelCase__ : int = []
for key, info in class_info.items():
UpperCAmelCase__ : int = info["""name"""]
class_names.append(info["""name"""] )
if info["isthing"]:
thing_ids.append(int(__lowerCamelCase ) )
UpperCAmelCase__ : List[str] = thing_ids
UpperCAmelCase__ : Any = class_names
return metadata
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=7 , _lowerCAmelCase=3 , _lowerCAmelCase=30 , _lowerCAmelCase=400 , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=10 , _lowerCAmelCase=False , _lowerCAmelCase=255 , _lowerCAmelCase="shi-labs/oneformer_demo" , _lowerCAmelCase="ade20k_panoptic.json" , _lowerCAmelCase=10 , ):
UpperCAmelCase__ : Any = parent
UpperCAmelCase__ : Tuple = batch_size
UpperCAmelCase__ : List[Any] = num_channels
UpperCAmelCase__ : str = min_resolution
UpperCAmelCase__ : Dict = max_resolution
UpperCAmelCase__ : List[str] = do_resize
UpperCAmelCase__ : Union[str, Any] = {"""shortest_edge""": 32, """longest_edge""": 1333} if size is None else size
UpperCAmelCase__ : str = do_normalize
UpperCAmelCase__ : Optional[Any] = image_mean
UpperCAmelCase__ : Any = image_std
UpperCAmelCase__ : str = class_info_file
UpperCAmelCase__ : Tuple = prepare_metadata(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ : Dict = num_text
UpperCAmelCase__ : Dict = repo_path
# for the post_process_functions
UpperCAmelCase__ : Union[str, Any] = 2
UpperCAmelCase__ : int = 10
UpperCAmelCase__ : int = 10
UpperCAmelCase__ : int = 3
UpperCAmelCase__ : int = 4
UpperCAmelCase__ : Any = num_labels
UpperCAmelCase__ : List[Any] = do_reduce_labels
UpperCAmelCase__ : int = ignore_index
def __UpperCAmelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase=False ):
if not batched:
UpperCAmelCase__ : Any = image_inputs[0]
if isinstance(_lowerCAmelCase , Image.Image ):
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = image.size
else:
UpperCAmelCase__ , UpperCAmelCase__ : Any = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase__ : Dict = int(self.size["""shortest_edge"""] * h / w )
UpperCAmelCase__ : Tuple = self.size["""shortest_edge"""]
elif w > h:
UpperCAmelCase__ : Dict = self.size["""shortest_edge"""]
UpperCAmelCase__ : Optional[int] = int(self.size["""shortest_edge"""] * w / h )
else:
UpperCAmelCase__ : List[str] = self.size["""shortest_edge"""]
UpperCAmelCase__ : Optional[Any] = self.size["""shortest_edge"""]
else:
UpperCAmelCase__ : int = []
for image in image_inputs:
UpperCAmelCase__ , UpperCAmelCase__ : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase__ : Optional[Any] = max(_lowerCAmelCase , key=lambda _lowerCAmelCase : item[0] )[0]
UpperCAmelCase__ : Dict = max(_lowerCAmelCase , key=lambda _lowerCAmelCase : item[1] )[1]
return expected_height, expected_width
def __UpperCAmelCase ( self ):
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class UpperCAmelCase_ ( __lowerCamelCase , unittest.TestCase ):
__lowerCamelCase = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
__lowerCamelCase = image_processing_class
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Optional[int] = OneFormerImageProcessorTester(self )
@property
def __UpperCAmelCase ( self ):
return self.image_processing_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , """image_mean""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """image_std""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """size""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """ignore_index""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """class_info_file""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """num_text""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """repo_path""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """metadata""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """do_reduce_labels""" ) )
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
# Initialize image_processor
UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : Optional[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image )
# Test not batched input
UpperCAmelCase__ : Optional[int] = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.image_processing_tester.get_expected_values(_lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.image_processing_tester.get_expected_values(_lowerCAmelCase , batched=_lowerCAmelCase )
UpperCAmelCase__ : str = image_processor(
_lowerCAmelCase , ["""semantic"""] * len(_lowerCAmelCase ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self ):
# Initialize image_processor
UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , np.ndarray )
# Test not batched input
UpperCAmelCase__ : Optional[Any] = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.image_processing_tester.get_expected_values(_lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.image_processing_tester.get_expected_values(_lowerCAmelCase , batched=_lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = image_processor(
_lowerCAmelCase , ["""semantic"""] * len(_lowerCAmelCase ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self ):
# Initialize image_processor
UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
# Test not batched input
UpperCAmelCase__ : Optional[Any] = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.image_processing_tester.get_expected_values(_lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.image_processing_tester.get_expected_values(_lowerCAmelCase , batched=_lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = image_processor(
_lowerCAmelCase , ["""semantic"""] * len(_lowerCAmelCase ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase="np" ):
UpperCAmelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
UpperCAmelCase__ : Union[str, Any] = self.image_processing_tester.num_labels
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Optional[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=_lowerCAmelCase )
if with_segmentation_maps:
UpperCAmelCase__ : int = num_labels
if is_instance_map:
UpperCAmelCase__ : Optional[int] = list(range(_lowerCAmelCase ) ) * 2
UpperCAmelCase__ : Dict = dict(enumerate(_lowerCAmelCase ) )
UpperCAmelCase__ : int = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
UpperCAmelCase__ : List[str] = [Image.fromarray(_lowerCAmelCase ) for annotation in annotations]
UpperCAmelCase__ : List[Any] = image_processor(
_lowerCAmelCase , ["""semantic"""] * len(_lowerCAmelCase ) , _lowerCAmelCase , return_tensors="""pt""" , instance_id_to_semantic_id=_lowerCAmelCase , pad_and_return_pixel_mask=_lowerCAmelCase , )
return inputs
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
def common(_lowerCAmelCase=False , _lowerCAmelCase=None ):
UpperCAmelCase__ : List[str] = self.comm_get_image_processor_inputs(
with_segmentation_maps=_lowerCAmelCase , is_instance_map=_lowerCAmelCase , segmentation_type=_lowerCAmelCase )
UpperCAmelCase__ : Any = inputs["""mask_labels"""]
UpperCAmelCase__ : Optional[Any] = inputs["""class_labels"""]
UpperCAmelCase__ : Any = inputs["""pixel_values"""]
UpperCAmelCase__ : Any = inputs["""text_inputs"""]
# check the batch_size
for mask_label, class_label, text_input in zip(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(_lowerCAmelCase ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=_lowerCAmelCase )
common(is_instance_map=_lowerCAmelCase , segmentation_type="""pil""" )
common(is_instance_map=_lowerCAmelCase , segmentation_type="""pil""" )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : int = np.zeros((20, 50) )
UpperCAmelCase__ : List[Any] = 1
UpperCAmelCase__ : List[Any] = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : str = binary_mask_to_rle(_lowerCAmelCase )
self.assertEqual(len(_lowerCAmelCase ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Any = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
UpperCAmelCase__ : Tuple = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase__ : str = fature_extractor.post_process_semantic_segmentation(_lowerCAmelCase )
self.assertEqual(len(_lowerCAmelCase ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
UpperCAmelCase__ : Tuple = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
UpperCAmelCase__ : Union[str, Any] = fature_extractor.post_process_semantic_segmentation(_lowerCAmelCase , target_sizes=_lowerCAmelCase )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Any = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
UpperCAmelCase__ : str = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase__ : Optional[int] = image_processor.post_process_instance_segmentation(_lowerCAmelCase , threshold=0 )
self.assertTrue(len(_lowerCAmelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , _lowerCAmelCase )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : int = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
UpperCAmelCase__ : List[Any] = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase__ : List[Any] = image_processor.post_process_panoptic_segmentation(_lowerCAmelCase , threshold=0 )
self.assertTrue(len(_lowerCAmelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , _lowerCAmelCase )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 79 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class __snake_case :
def __init__( self : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple=1_3 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : List[str]=2_4 , __lowerCAmelCase : str=1_6 , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Optional[Any]=3_2 , __lowerCAmelCase : List[Any]=5 , __lowerCAmelCase : int=4 , __lowerCAmelCase : int=3_7 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : int=1_0 , __lowerCAmelCase : List[Any]=0.02 , __lowerCAmelCase : str=None , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Union[str, Any]=2 , ):
"""simple docstring"""
_lowerCamelCase : List[str] = parent
_lowerCamelCase : str = batch_size
_lowerCamelCase : Tuple = patch_size
_lowerCamelCase : Optional[int] = max_length
_lowerCamelCase : List[Any] = num_mel_bins
_lowerCamelCase : int = is_training
_lowerCamelCase : Union[str, Any] = use_labels
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : Tuple = num_hidden_layers
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : Tuple = intermediate_size
_lowerCamelCase : List[str] = hidden_act
_lowerCamelCase : Dict = hidden_dropout_prob
_lowerCamelCase : int = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = type_sequence_label_size
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : List[str] = scope
_lowerCamelCase : Optional[int] = frequency_stride
_lowerCamelCase : List[Any] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_lowerCamelCase : Optional[int] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
_lowerCamelCase : Union[str, Any] = (self.max_length - self.patch_size) // self.time_stride + 1
_lowerCamelCase : Any = frequency_out_dimension * time_out_dimension
_lowerCamelCase : List[Any] = num_patches + 2
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : int = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
_lowerCamelCase : str = None
if self.use_labels:
_lowerCamelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : Optional[int] = self.get_config()
return config, input_values, labels
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : List[Any] = ASTModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : List[Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : int = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Optional[Any] = config_and_inputs
_lowerCamelCase : int = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : List[Any] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
snake_case__ : Tuple = (
{"audio-classification": ASTForAudioClassification, "feature-extraction": ASTModel}
if is_torch_available()
else {}
)
snake_case__ : Any = False
snake_case__ : List[Any] = False
snake_case__ : Optional[Any] = False
snake_case__ : Optional[Any] = False
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = ASTModelTester(self )
_lowerCamelCase : Any = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Dict = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCamelCase : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : str = model_class(__lowerCAmelCase )
_lowerCamelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Any = [*signature.parameters.keys()]
_lowerCamelCase : str = ['''input_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Union[str, Any] = ASTModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : List[str] = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''', filename='''sample_audio.flac''', repo_type='''dataset''' )
_lowerCamelCase , _lowerCamelCase : str = torchaudio.load(A_ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class __snake_case ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : int = self.default_feature_extractor
_lowerCamelCase : Union[str, Any] = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(__lowerCAmelCase )
_lowerCamelCase : List[Any] = self.default_feature_extractor
_lowerCamelCase , _lowerCamelCase : List[Any] = prepare_audio()
_lowerCamelCase : Dict = audio.squeeze().numpy()
_lowerCamelCase : Tuple = feature_extractor(__lowerCAmelCase , sampling_rate=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCamelCase : Tuple = model(**__lowerCAmelCase )
# verify the logits
_lowerCamelCase : Tuple = torch.Size((1, 5_2_7) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
_lowerCamelCase : Optional[int] = torch.tensor([-0.87_60, -7.00_42, -8.66_02] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
| 83 | 0 |
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__UpperCamelCase : Dict = [
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : Dict , _lowerCAmelCase : str , _lowerCAmelCase : bool , _lowerCAmelCase : str = None , _lowerCAmelCase : list = None ) -> int:
"""simple docstring"""
__lowercase = None
__lowercase = os.path.abspath(os.path.join("""examples""" , """by_feature""" ) )
__lowercase = os.path.abspath("""examples""" )
for item in os.listdir(_lowerCAmelCase ):
if item not in EXCLUDE_EXAMPLES:
__lowercase = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
if os.path.isfile(_lowerCAmelCase ) and ".py" in item_path:
with self.subTest(
tested_script=_lowerCAmelCase , feature_script=_lowerCAmelCase , tested_section="""main()""" if parser_only else """training_function()""" , ):
__lowercase = compare_against_test(
os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__lowercase = """\n""".join(_lowerCAmelCase )
if special_strings is not None:
for string in special_strings:
__lowercase = diff.replace(_lowerCAmelCase , """""" )
self.assertEqual(_lowerCAmelCase , """""" )
def _a ( self : str ) -> List[Any]:
"""simple docstring"""
self.one_complete_example("""complete_nlp_example.py""" , _lowerCAmelCase )
self.one_complete_example("""complete_nlp_example.py""" , _lowerCAmelCase )
def _a ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase = os.path.abspath(os.path.join("""examples""" , """cv_example.py""" ) )
__lowercase = [
""" """ * 16 + """{\n\n""",
""" """ * 20 + """\"accuracy\": eval_metric[\"accuracy\"],\n\n""",
""" """ * 20 + """\"f1\": eval_metric[\"f1\"],\n\n""",
""" """ * 20 + """\"train_loss\": total_loss.item() / len(train_dataloader),\n\n""",
""" """ * 20 + """\"epoch\": epoch,\n\n""",
""" """ * 16 + """},\n\n""",
""" """ * 16 + """step=epoch,\n""",
""" """ * 12,
""" """ * 8 + """for step, batch in enumerate(active_dataloader):\n""",
]
self.one_complete_example("""complete_cv_example.py""" , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
self.one_complete_example("""complete_cv_example.py""" , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
@mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '1'} )
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :List[Any] = False
@classmethod
def _a ( cls : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().setUpClass()
__lowercase = tempfile.mkdtemp()
__lowercase = os.path.join(cls._tmpdir , """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
__lowercase = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def _a ( cls : Dict ) -> Tuple:
"""simple docstring"""
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def _a ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase = F'\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """epoch_0""" ) ) )
def _a ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = F'\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '.split()
__lowercase = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """step_2""" ) ) )
def _a ( self : int ) -> Dict:
"""simple docstring"""
__lowercase = F'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}\n '.split()
__lowercase = run_command(self._launch_args + testargs , return_stdout=_lowerCAmelCase )
self.assertNotIn("""epoch 0:""" , _lowerCAmelCase )
self.assertIn("""epoch 1:""" , _lowerCAmelCase )
def _a ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = F'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}\n '.split()
__lowercase = run_command(self._launch_args + testargs , return_stdout=_lowerCAmelCase )
if torch.cuda.is_available():
__lowercase = torch.cuda.device_count()
else:
__lowercase = 1
if num_processes > 1:
self.assertNotIn("""epoch 0:""" , _lowerCAmelCase )
self.assertIn("""epoch 1:""" , _lowerCAmelCase )
else:
self.assertIn("""epoch 0:""" , _lowerCAmelCase )
self.assertIn("""epoch 1:""" , _lowerCAmelCase )
@slow
def _a ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase = """
examples/by_feature/cross_validation.py
--num_folds 2
""".split()
with mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """0"""} ):
__lowercase = run_command(self._launch_args + testargs , return_stdout=_lowerCAmelCase )
__lowercase = re.findall("""({.+})""" , _lowerCAmelCase )
__lowercase = [r for r in results if """accuracy""" in r][-1]
__lowercase = ast.literal_eval(_lowerCAmelCase )
self.assertGreaterEqual(results["""accuracy"""] , 0.75 )
def _a ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase = ["""examples/by_feature/multi_process_metrics.py"""]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def _a ( self : Optional[int] ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
__lowercase = F'\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(_lowerCAmelCase , """tracking""" ) ) )
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = ["""examples/by_feature/gradient_accumulation.py"""]
run_command(self._launch_args + testargs )
def _a ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = ["""examples/by_feature/local_sgd.py"""]
run_command(self._launch_args + testargs )
| 80 |
"""simple docstring"""
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def snake_case_ ( A_ : Dict, A_ : bool = True, A_ : float = math.inf, A_ : float = -math.inf, A_ : float = math.inf, A_ : float = -math.inf, A_ : bool = False, A_ : float = 1_00, A_ : float = 0.01, A_ : float = 1, ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : str = search_prob
_lowerCamelCase : str = start_temperate
_lowerCamelCase : Optional[Any] = []
_lowerCamelCase : int = 0
_lowerCamelCase : Any = None
while not search_end:
_lowerCamelCase : Dict = current_state.score()
if best_state is None or current_score > best_state.score():
_lowerCamelCase : Tuple = current_state
scores.append(A_ )
iterations += 1
_lowerCamelCase : List[Any] = None
_lowerCamelCase : Optional[int] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_lowerCamelCase : List[Any] = random.randint(0, len(A_ ) - 1 ) # picking a random neighbor
_lowerCamelCase : Dict = neighbors.pop(A_ )
_lowerCamelCase : Union[str, Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_lowerCamelCase : str = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_lowerCamelCase : Optional[Any] = picked_neighbor
else:
_lowerCamelCase : Optional[int] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_lowerCamelCase : Union[str, Any] = picked_neighbor
_lowerCamelCase : List[str] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_lowerCamelCase : Tuple = True
else:
_lowerCamelCase : Optional[Any] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(A_ ), A_ )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def snake_case_ ( A_ : int, A_ : Tuple ):
'''simple docstring'''
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase__ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase__ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def snake_case_ ( A_ : Optional[int], A_ : List[Any] ):
'''simple docstring'''
return (3 * x**2) - (6 * y)
lowerCAmelCase__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F"""{local_min.score()}"""
)
lowerCAmelCase__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F"""{local_min.score()}"""
)
| 83 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class a (unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCamelCase : Any , lowerCamelCase : int=7 , lowerCamelCase : Dict=3 , lowerCamelCase : Optional[Any]=18 , lowerCamelCase : Optional[Any]=30 , lowerCamelCase : Any=400 , lowerCamelCase : int=True , lowerCamelCase : Tuple=None , lowerCamelCase : List[str]=True , lowerCamelCase : Optional[int]=None , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : str=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , lowerCamelCase : Optional[int]=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , lowerCamelCase : Optional[int]=True , ) -> List[Any]:
__snake_case : List[str] = size if size is not None else {"height": 224, "width": 224}
__snake_case : List[str] = crop_size if crop_size is not None else {"height": 18, "width": 18}
__snake_case : Tuple = parent
__snake_case : List[str] = batch_size
__snake_case : Union[str, Any] = num_channels
__snake_case : Union[str, Any] = image_size
__snake_case : Union[str, Any] = min_resolution
__snake_case : str = max_resolution
__snake_case : List[Any] = do_resize
__snake_case : Optional[int] = size
__snake_case : int = do_center_crop
__snake_case : Dict = crop_size
__snake_case : List[Any] = do_normalize
__snake_case : str = image_mean
__snake_case : Optional[int] = image_std
__snake_case : Union[str, Any] = do_convert_rgb
def __snake_case ( self : str ) -> Optional[int]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def __snake_case ( self : Optional[Any] , lowerCamelCase : List[Any]=False , lowerCamelCase : Optional[Any]=False , lowerCamelCase : List[str]=False ) -> Optional[int]:
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
__snake_case : List[str] = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
__snake_case : List[str] = []
for i in range(self.batch_size ):
__snake_case , __snake_case : Tuple = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
__snake_case : str = [Image.fromarray(np.moveaxis(lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
if torchify:
__snake_case : Dict = [torch.from_numpy(lowerCamelCase ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class a (_lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = ChineseCLIPImageProcessor if is_vision_available() else None
def __snake_case ( self : int ) -> str:
__snake_case : Optional[Any] = ChineseCLIPImageProcessingTester(self , do_center_crop=lowerCamelCase )
@property
def __snake_case ( self : List[str] ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case ( self : Union[str, Any] ) -> Optional[Any]:
__snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase , "size" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_center_crop" ) )
self.assertTrue(hasattr(lowerCamelCase , "center_crop" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_convert_rgb" ) )
def __snake_case ( self : List[Any] ) -> Dict:
__snake_case : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 224, "width": 224} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
__snake_case : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def __snake_case ( self : str ) -> int:
pass
def __snake_case ( self : Optional[int] ) -> Any:
# Initialize image_processing
__snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case : List[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
__snake_case : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__snake_case : Dict = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __snake_case ( self : Optional[Any] ) -> Optional[int]:
# Initialize image_processing
__snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case : Any = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
__snake_case : Any = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__snake_case : List[str] = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __snake_case ( self : Any ) -> Dict:
# Initialize image_processing
__snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case : str = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
__snake_case : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__snake_case : Dict = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
@require_torch
@require_vision
class a (_lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = ChineseCLIPImageProcessor if is_vision_available() else None
def __snake_case ( self : Tuple ) -> Optional[int]:
__snake_case : Optional[int] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=lowerCamelCase )
__snake_case : Any = 3
@property
def __snake_case ( self : List[Any] ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case ( self : List[str] ) -> Union[str, Any]:
__snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase , "size" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_center_crop" ) )
self.assertTrue(hasattr(lowerCamelCase , "center_crop" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_convert_rgb" ) )
def __snake_case ( self : int ) -> Tuple:
pass
def __snake_case ( self : List[Any] ) -> Any:
# Initialize image_processing
__snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case : Union[str, Any] = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
__snake_case : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__snake_case : List[Any] = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 81 |
"""simple docstring"""
from collections import namedtuple
lowerCAmelCase__ = namedtuple('''from_to''', '''from_ to''')
lowerCAmelCase__ = {
'''cubicmeter''': from_to(1, 1),
'''litre''': from_to(0.0_0_1, 1000),
'''kilolitre''': from_to(1, 1),
'''gallon''': from_to(0.0_0_4_5_4, 2_6_4.1_7_2),
'''cubicyard''': from_to(0.7_6_4_5_5, 1.3_0_7_9_5),
'''cubicfoot''': from_to(0.0_2_8, 3_5.3_1_4_7),
'''cup''': from_to(0.0_0_0_2_3_6_5_8_8, 4_2_2_6.7_5),
}
def snake_case_ ( A_ : float, A_ : str, A_ : str ):
'''simple docstring'''
if from_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ ''', '''.join(A_ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ ''', '''.join(A_ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = 1
UpperCAmelCase_ = 3
UpperCAmelCase_ = (32, 32)
UpperCAmelCase_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_UpperCAmelCase )
return image
@property
def lowercase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=_UpperCAmelCase , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def lowercase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def lowercase__ ( self : int ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
return CLIPTextModel(_UpperCAmelCase )
def lowercase__ ( self : str ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.dummy_cond_unet_upscale
UpperCAmelCase_ = DDPMScheduler()
UpperCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" )
UpperCAmelCase_ = self.dummy_vae
UpperCAmelCase_ = self.dummy_text_encoder
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ = StableDiffusionUpscalePipeline(
unet=_UpperCAmelCase , low_res_scheduler=_UpperCAmelCase , scheduler=_UpperCAmelCase , vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , max_noise_level=350 , )
UpperCAmelCase_ = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = "A painting of a squirrel eating a burger"
UpperCAmelCase_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
UpperCAmelCase_ = sd_pipe(
[prompt] , image=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
UpperCAmelCase_ = sd_pipe(
[prompt] , image=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , return_dict=_UpperCAmelCase , )[0]
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
UpperCAmelCase_ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
UpperCAmelCase_ = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.dummy_cond_unet_upscale
UpperCAmelCase_ = DDPMScheduler()
UpperCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" )
UpperCAmelCase_ = self.dummy_vae
UpperCAmelCase_ = self.dummy_text_encoder
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ = StableDiffusionUpscalePipeline(
unet=_UpperCAmelCase , low_res_scheduler=_UpperCAmelCase , scheduler=_UpperCAmelCase , vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , max_noise_level=350 , )
UpperCAmelCase_ = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = "A painting of a squirrel eating a burger"
UpperCAmelCase_ = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_ = output.images
assert image.shape[0] == 2
UpperCAmelCase_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
UpperCAmelCase_ = sd_pipe(
[prompt] , image=_UpperCAmelCase , generator=_UpperCAmelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_ = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def lowercase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = self.dummy_cond_unet_upscale
UpperCAmelCase_ = DDPMScheduler()
UpperCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" )
UpperCAmelCase_ = self.dummy_vae
UpperCAmelCase_ = self.dummy_text_encoder
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert("RGB" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
UpperCAmelCase_ = unet.half()
UpperCAmelCase_ = text_encoder.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ = StableDiffusionUpscalePipeline(
unet=_UpperCAmelCase , low_res_scheduler=_UpperCAmelCase , scheduler=_UpperCAmelCase , vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , max_noise_level=350 , )
UpperCAmelCase_ = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = "A painting of a squirrel eating a burger"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = sd_pipe(
[prompt] , image=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2 , output_type="np" , ).images
UpperCAmelCase_ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy" )
UpperCAmelCase_ = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained(_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
UpperCAmelCase_ = "a cat sitting on a park bench"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , generator=_UpperCAmelCase , output_type="np" , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def lowercase__ ( self : str ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy" )
UpperCAmelCase_ = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained(
_UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
UpperCAmelCase_ = "a cat sitting on a park bench"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , generator=_UpperCAmelCase , output_type="np" , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def lowercase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCAmelCase_ = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained(
_UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase_ = "a cat sitting on a park bench"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=5 , output_type="np" , )
UpperCAmelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 82 |
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case ( _lowercase):
def __init__( self : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : int=1_3 , __lowerCAmelCase : Optional[int]=7 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : str=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : int=True , __lowerCAmelCase : List[str]=9_9 , __lowerCAmelCase : List[Any]=3_2 , __lowerCAmelCase : Union[str, Any]=5 , __lowerCAmelCase : List[str]=4 , __lowerCAmelCase : Optional[int]=3_7 , __lowerCAmelCase : List[Any]="gelu" , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : int=5_1_2 , __lowerCAmelCase : Tuple=1_6 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Any="None" , __lowerCAmelCase : str=3 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : Optional[Any]=None , ):
"""simple docstring"""
_lowerCamelCase : Dict = parent
_lowerCamelCase : Union[str, Any] = batch_size
_lowerCamelCase : Optional[Any] = seq_length
_lowerCamelCase : Optional[Any] = is_training
_lowerCamelCase : Dict = use_input_mask
_lowerCamelCase : Tuple = use_token_type_ids
_lowerCamelCase : Optional[Any] = use_labels
_lowerCamelCase : List[str] = vocab_size
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : int = num_hidden_layers
_lowerCamelCase : Optional[Any] = num_attention_heads
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : int = hidden_dropout_prob
_lowerCamelCase : Dict = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : List[Any] = type_sequence_label_size
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : Optional[int] = num_labels
_lowerCamelCase : Any = num_choices
_lowerCamelCase : int = relative_attention
_lowerCamelCase : Union[str, Any] = position_biased_input
_lowerCamelCase : str = pos_att_type
_lowerCamelCase : Tuple = scope
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : List[Any] = None
if self.use_input_mask:
_lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_lowerCamelCase : Any = None
if self.use_token_type_ids:
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase : Any = None
_lowerCamelCase : int = None
_lowerCamelCase : Union[str, Any] = None
if self.use_labels:
_lowerCamelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices )
_lowerCamelCase : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : str ):
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : List[str] = DebertaVaModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Dict = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )[0]
_lowerCamelCase : str = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )[0]
_lowerCamelCase : List[Any] = model(__lowerCAmelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : Tuple = DebertaVaForMaskedLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Optional[int] = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.num_labels
_lowerCamelCase : Dict = DebertaVaForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Dict = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.num_labels
_lowerCamelCase : Tuple = DebertaVaForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Any = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[str] = DebertaVaForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Tuple = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = DebertaVaForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : List[Any] = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Any = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCamelCase : Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : int = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
snake_case__ : int = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : List[str] = True
snake_case__ : List[Any] = False
snake_case__ : int = False
snake_case__ : Optional[Any] = False
snake_case__ : str = False
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : List[str] = DebertaVaModelTester(self )
_lowerCamelCase : Any = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*__lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Tuple = DebertaVaModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase):
@unittest.skip(reason='''Model not available yet''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
pass
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Tuple = DebertaVaModel.from_pretrained('''microsoft/deberta-v2-xlarge''' )
_lowerCamelCase : List[str] = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
_lowerCamelCase : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowerCamelCase : Tuple = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0]
# compare the actual values for a slice.
_lowerCamelCase : Union[str, Any] = torch.tensor(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCAmelCase , atol=1E-4 ) , f'''{output[:, 1:4, 1:4]}''' )
| 83 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
UpperCAmelCase = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
UpperCAmelCase = TaTokenizerFast
UpperCAmelCase = {'''configuration_mt5''': ['''MT5Config''', '''MT5OnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''MT5EncoderModel''',
'''MT5ForConditionalGeneration''',
'''MT5ForQuestionAnswering''',
'''MT5Model''',
'''MT5PreTrainedModel''',
'''MT5Stack''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''TFMT5EncoderModel''', '''TFMT5ForConditionalGeneration''', '''TFMT5Model''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''FlaxMT5EncoderModel''', '''FlaxMT5ForConditionalGeneration''', '''FlaxMT5Model''']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
UpperCAmelCase = _LazyModule(
__name__,
globals()['''__file__'''],
_import_structure,
extra_objects={'''MT5Tokenizer''': MTaTokenizer, '''MT5TokenizerFast''': MTaTokenizerFast},
module_spec=__spec__,
)
| 84 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
lowerCAmelCase__ = '''hf-internal-testing/tiny-random-bert'''
lowerCAmelCase__ = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''')
lowerCAmelCase__ = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6'''
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = cached_file(__lowerCAmelCase , __lowerCAmelCase )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(__lowerCAmelCase ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) ) )
with open(os.path.join(__lowerCAmelCase , '''refs''' , '''main''' ) ) as f:
_lowerCamelCase : Optional[int] = f.read()
self.assertEqual(__lowerCAmelCase , os.path.join(__lowerCAmelCase , '''snapshots''' , __lowerCAmelCase , __lowerCAmelCase ) )
self.assertTrue(os.path.isfile(__lowerCAmelCase ) )
# File is cached at the same place the second time.
_lowerCamelCase : Tuple = cached_file(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
# Using a specific revision to test the full commit hash.
_lowerCamelCase : Dict = cached_file(__lowerCAmelCase , __lowerCAmelCase , revision='''9b8c223''' )
self.assertEqual(__lowerCAmelCase , os.path.join(__lowerCAmelCase , '''snapshots''' , __lowerCAmelCase , __lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
with self.assertRaisesRegex(__lowerCAmelCase , '''is not a valid model identifier''' ):
_lowerCamelCase : Optional[int] = cached_file('''tiny-random-bert''' , __lowerCAmelCase )
with self.assertRaisesRegex(__lowerCAmelCase , '''is not a valid git identifier''' ):
_lowerCamelCase : str = cached_file(__lowerCAmelCase , __lowerCAmelCase , revision='''aaaa''' )
with self.assertRaisesRegex(__lowerCAmelCase , '''does not appear to have a file named''' ):
_lowerCamelCase : int = cached_file(__lowerCAmelCase , '''conf''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
with self.assertRaisesRegex(__lowerCAmelCase , '''does not appear to have a file named''' ):
_lowerCamelCase : Dict = cached_file(__lowerCAmelCase , '''conf''' )
with open(os.path.join(__lowerCAmelCase , '''refs''' , '''main''' ) ) as f:
_lowerCamelCase : List[Any] = f.read()
self.assertTrue(os.path.isfile(os.path.join(__lowerCAmelCase , '''.no_exist''' , __lowerCAmelCase , '''conf''' ) ) )
_lowerCamelCase : str = cached_file(__lowerCAmelCase , '''conf''' , _raise_exceptions_for_missing_entries=__lowerCAmelCase )
self.assertIsNone(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = cached_file(__lowerCAmelCase , '''conf''' , local_files_only=__lowerCAmelCase , _raise_exceptions_for_missing_entries=__lowerCAmelCase )
self.assertIsNone(__lowerCAmelCase )
_lowerCamelCase : Any = mock.Mock()
_lowerCamelCase : Optional[Any] = 5_0_0
_lowerCamelCase : Dict = {}
_lowerCamelCase : List[Any] = HTTPError
_lowerCamelCase : int = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=__lowerCAmelCase ) as mock_head:
_lowerCamelCase : Union[str, Any] = cached_file(__lowerCAmelCase , '''conf''' , _raise_exceptions_for_connection_errors=__lowerCAmelCase )
self.assertIsNone(__lowerCAmelCase )
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __lowerCAmelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __lowerCAmelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(__lowerCAmelCase , '''is not a valid model identifier''' ):
get_file_from_repo('''bert-base-case''' , __lowerCAmelCase )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(__lowerCAmelCase , '''is not a valid git identifier''' ):
get_file_from_repo('''bert-base-cased''' , __lowerCAmelCase , revision='''ahaha''' )
_lowerCamelCase : Dict = get_file_from_repo('''bert-base-cased''' , __lowerCAmelCase )
# The name is the cached name which is not very easy to test, so instead we load the content.
_lowerCamelCase : Dict = json.loads(open(__lowerCAmelCase , '''r''' ).read() )
self.assertEqual(config['''hidden_size'''] , 7_6_8 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : Any = Path(__lowerCAmelCase ) / '''a.txt'''
filename.touch()
self.assertEqual(get_file_from_repo(__lowerCAmelCase , '''a.txt''' ) , str(__lowerCAmelCase ) )
self.assertIsNone(get_file_from_repo(__lowerCAmelCase , '''b.txt''' ) )
| 83 | 0 |
def _a ( lowercase__ : int = 10_00 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = 2**power
SCREAMING_SNAKE_CASE__ : Any = 0
while n:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 85 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __snake_case ( _lowercase):
snake_case__ : List[str] = "cvt"
def __init__( self : Any , __lowerCAmelCase : Union[str, Any]=3 , __lowerCAmelCase : List[str]=[7, 3, 3] , __lowerCAmelCase : int=[4, 2, 2] , __lowerCAmelCase : int=[2, 1, 1] , __lowerCAmelCase : str=[6_4, 1_9_2, 3_8_4] , __lowerCAmelCase : Dict=[1, 3, 6] , __lowerCAmelCase : Optional[Any]=[1, 2, 1_0] , __lowerCAmelCase : Dict=[4.0, 4.0, 4.0] , __lowerCAmelCase : Dict=[0.0, 0.0, 0.0] , __lowerCAmelCase : Union[str, Any]=[0.0, 0.0, 0.0] , __lowerCAmelCase : int=[0.0, 0.0, 0.1] , __lowerCAmelCase : Union[str, Any]=[True, True, True] , __lowerCAmelCase : str=[False, False, True] , __lowerCAmelCase : List[str]=["dw_bn", "dw_bn", "dw_bn"] , __lowerCAmelCase : List[Any]=[3, 3, 3] , __lowerCAmelCase : Dict=[1, 1, 1] , __lowerCAmelCase : str=[2, 2, 2] , __lowerCAmelCase : Optional[Any]=[1, 1, 1] , __lowerCAmelCase : Optional[Any]=[1, 1, 1] , __lowerCAmelCase : str=0.02 , __lowerCAmelCase : Any=1E-12 , **__lowerCAmelCase : int , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
_lowerCamelCase : Optional[int] = num_channels
_lowerCamelCase : int = patch_sizes
_lowerCamelCase : Optional[Any] = patch_stride
_lowerCamelCase : str = patch_padding
_lowerCamelCase : Any = embed_dim
_lowerCamelCase : Optional[Any] = num_heads
_lowerCamelCase : Dict = depth
_lowerCamelCase : Optional[int] = mlp_ratio
_lowerCamelCase : Any = attention_drop_rate
_lowerCamelCase : Any = drop_rate
_lowerCamelCase : Dict = drop_path_rate
_lowerCamelCase : Optional[int] = qkv_bias
_lowerCamelCase : int = cls_token
_lowerCamelCase : int = qkv_projection_method
_lowerCamelCase : Optional[Any] = kernel_qkv
_lowerCamelCase : List[str] = padding_kv
_lowerCamelCase : Tuple = stride_kv
_lowerCamelCase : Union[str, Any] = padding_q
_lowerCamelCase : Optional[Any] = stride_q
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : Optional[int] = layer_norm_eps
| 83 | 0 |
def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : int ):
"""simple docstring"""
return "\n".join(
f'''{number} * {i} = {number * i}''' for i in range(1 ,number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10)) | 86 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class __snake_case ( _lowercase):
snake_case__ : torch.FloatTensor
snake_case__ : torch.FloatTensor
class __snake_case ( _lowercase , _lowercase):
snake_case__ : int = 1
@register_to_config
def __init__( self : str , __lowerCAmelCase : int = 2_0_0_0 , __lowerCAmelCase : float = 0.15 , __lowerCAmelCase : float = 0.01 , __lowerCAmelCase : float = 13_48.0 , __lowerCAmelCase : float = 1E-5 , __lowerCAmelCase : int = 1 , ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = sigma_max
# setable values
_lowerCamelCase : Dict = None
self.set_sigmas(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : Optional[int] = None ):
"""simple docstring"""
return sample
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : float = None , __lowerCAmelCase : Union[str, torch.device] = None ):
"""simple docstring"""
_lowerCamelCase : Tuple = sampling_eps if sampling_eps is not None else self.config.sampling_eps
_lowerCamelCase : Optional[int] = torch.linspace(1 , __lowerCAmelCase , __lowerCAmelCase , device=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : float = None , __lowerCAmelCase : float = None , __lowerCAmelCase : float = None ):
"""simple docstring"""
_lowerCamelCase : List[str] = sigma_min if sigma_min is not None else self.config.sigma_min
_lowerCamelCase : int = sigma_max if sigma_max is not None else self.config.sigma_max
_lowerCamelCase : Any = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[Any] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
_lowerCamelCase : Optional[int] = torch.exp(torch.linspace(math.log(__lowerCAmelCase ) , math.log(__lowerCAmelCase ) , __lowerCAmelCase ) )
_lowerCamelCase : Tuple = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : int , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : Optional[torch.Generator] = None , __lowerCAmelCase : bool = True , ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
_lowerCamelCase : Tuple = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
_lowerCamelCase : Dict = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
_lowerCamelCase : Optional[int] = timesteps.to(self.discrete_sigmas.device )
_lowerCamelCase : Any = self.discrete_sigmas[timesteps].to(sample.device )
_lowerCamelCase : int = self.get_adjacent_sigma(__lowerCAmelCase , __lowerCAmelCase ).to(sample.device )
_lowerCamelCase : Any = torch.zeros_like(__lowerCAmelCase )
_lowerCamelCase : Any = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
_lowerCamelCase : Union[str, Any] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
_lowerCamelCase : List[Any] = diffusion.unsqueeze(-1 )
_lowerCamelCase : int = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
_lowerCamelCase : List[str] = randn_tensor(
sample.shape , layout=sample.layout , generator=__lowerCAmelCase , device=sample.device , dtype=sample.dtype )
_lowerCamelCase : List[Any] = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
_lowerCamelCase : int = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__lowerCAmelCase , prev_sample_mean=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : Optional[torch.Generator] = None , __lowerCAmelCase : bool = True , ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
_lowerCamelCase : Union[str, Any] = randn_tensor(sample.shape , layout=sample.layout , generator=__lowerCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
_lowerCamelCase : Union[str, Any] = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
_lowerCamelCase : Tuple = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
_lowerCamelCase : str = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
_lowerCamelCase : Tuple = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
_lowerCamelCase : Union[str, Any] = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
_lowerCamelCase : str = step_size.unsqueeze(-1 )
_lowerCamelCase : Any = sample + step_size * model_output
_lowerCamelCase : int = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : torch.FloatTensor , ):
"""simple docstring"""
_lowerCamelCase : Dict = timesteps.to(original_samples.device )
_lowerCamelCase : Union[str, Any] = self.discrete_sigmas.to(original_samples.device )[timesteps]
_lowerCamelCase : Union[str, Any] = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__lowerCAmelCase ) * sigmas[:, None, None, None]
)
_lowerCamelCase : int = noise + original_samples
return noisy_samples
def __len__( self : Optional[int] ):
"""simple docstring"""
return self.config.num_train_timesteps
| 83 | 0 |
def SCREAMING_SNAKE_CASE ( lowercase_=28_123 ) -> Union[str, Any]:
"""simple docstring"""
A__ = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
A__ = set()
A__ = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(lowercase_ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 87 |
"""simple docstring"""
from torch import nn
def snake_case_ ( A_ : int ):
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F'''Unsupported activation function: {act_fn}''' )
| 83 | 0 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = Dict[str, Any]
UpperCAmelCase = List[Prediction]
@add_end_docstrings(A_ )
class lowercase__ ( A_ ):
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) -> int:
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
if self.framework == "tf":
raise ValueError(F'The {self.__class__} is only available in PyTorch.')
requires_backends(self , """vision""")
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items()))
def UpperCamelCase_ ( self , **SCREAMING_SNAKE_CASE) -> Optional[Any]:
_lowerCamelCase : int = {}
if "threshold" in kwargs:
_lowerCamelCase : List[Any] = kwargs["""threshold"""]
return {}, {}, postprocess_kwargs
def __call__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) -> Union[Predictions, List[Prediction]]:
return super().__call__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> Optional[Any]:
_lowerCamelCase : int = load_image(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Tuple = torch.IntTensor([[image.height, image.width]])
_lowerCamelCase : Dict = self.image_processor(images=[image] , return_tensors="""pt""")
if self.tokenizer is not None:
_lowerCamelCase : Union[str, Any] = self.tokenizer(text=inputs["""words"""] , boxes=inputs["""boxes"""] , return_tensors="""pt""")
_lowerCamelCase : Optional[Any] = target_size
return inputs
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> Union[str, Any]:
_lowerCamelCase : int = model_inputs.pop("""target_size""")
_lowerCamelCase : Optional[Any] = self.model(**SCREAMING_SNAKE_CASE)
_lowerCamelCase : Tuple = outputs.__class__({"""target_size""": target_size, **outputs})
if self.tokenizer is not None:
_lowerCamelCase : Union[str, Any] = model_inputs["""bbox"""]
return model_outputs
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0.9) -> List[str]:
_lowerCamelCase : Tuple = model_outputs["""target_size"""]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = target_size[0].tolist()
def unnormalize(SCREAMING_SNAKE_CASE):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
]))
_lowerCamelCase , _lowerCamelCase : List[Any] = model_outputs["""logits"""].squeeze(0).softmax(dim=-1).max(dim=-1)
_lowerCamelCase : int = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
_lowerCamelCase : Any = [unnormalize(SCREAMING_SNAKE_CASE) for bbox in model_outputs["""bbox"""].squeeze(0)]
_lowerCamelCase : str = ["""score""", """label""", """box"""]
_lowerCamelCase : Optional[Any] = [dict(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)) for vals in zip(scores.tolist() , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
_lowerCamelCase : Union[str, Any] = self.image_processor.post_process_object_detection(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[Any] = raw_annotations[0]
_lowerCamelCase : Tuple = raw_annotation["""scores"""]
_lowerCamelCase : int = raw_annotation["""labels"""]
_lowerCamelCase : Any = raw_annotation["""boxes"""]
_lowerCamelCase : str = scores.tolist()
_lowerCamelCase : Any = [self.model.config.idalabel[label.item()] for label in labels]
_lowerCamelCase : List[str] = [self._get_bounding_box(SCREAMING_SNAKE_CASE) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
_lowerCamelCase : Optional[int] = ["""score""", """label""", """box"""]
_lowerCamelCase : Any = [
dict(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE))
for vals in zip(raw_annotation["""scores"""] , raw_annotation["""labels"""] , raw_annotation["""boxes"""])
]
return annotation
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError("""The ObjectDetectionPipeline is only available in PyTorch.""")
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Tuple = box.int().tolist()
_lowerCamelCase : List[Any] = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 88 |
"""simple docstring"""
def snake_case_ ( A_ : int, A_ : int ):
'''simple docstring'''
return int(input_a == input_a == 0 )
def snake_case_ ( ):
'''simple docstring'''
print('''Truth Table of NOR Gate:''' )
print('''| Input 1 | Input 2 | Output |''' )
print(F'''| 0 | 0 | {nor_gate(0, 0 )} |''' )
print(F'''| 0 | 1 | {nor_gate(0, 1 )} |''' )
print(F'''| 1 | 0 | {nor_gate(1, 0 )} |''' )
print(F'''| 1 | 1 | {nor_gate(1, 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 83 | 0 |
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCamelCase( _a, unittest.TestCase ):
lowercase_ : Optional[Any] = FunnelTokenizer
lowercase_ : Union[str, Any] = FunnelTokenizerFast
lowercase_ : List[Any] = True
lowercase_ : Optional[int] = True
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
super().setUp()
_lowercase : Optional[int] = [
'<unk>',
'<cls>',
'<sep>',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_lowercase : Tuple = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
def UpperCamelCase ( self, **lowerCamelCase) -> Tuple:
"""simple docstring"""
return FunnelTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase)
def UpperCamelCase ( self, **lowerCamelCase) -> Any:
"""simple docstring"""
return FunnelTokenizerFast.from_pretrained(self.tmpdirname, **lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
_lowercase : int = 'UNwant\u00E9d,running'
_lowercase : str = 'unwanted, running'
return input_text, output_text
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : List[str] = self.tokenizer_class(self.vocab_file)
_lowercase : int = tokenizer.tokenize('UNwant\u00E9d,running')
self.assertListEqual(lowerCamelCase, ['un', '##want', '##ed', ',', 'runn', '##ing'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase), [7, 4, 5, 10, 8, 9])
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : str = self.get_tokenizers(do_lower_case=lowerCamelCase)
for tokenizer in tokenizers:
_lowercase : List[Any] = tokenizer('UNwant\u00E9d,running')
_lowercase : List[Any] = len(inputs['input_ids']) - 1
self.assertListEqual(inputs['token_type_ids'], [2] + [0] * sentence_len)
_lowercase : Union[str, Any] = tokenizer('UNwant\u00E9d,running', 'UNwant\u00E9d,running')
self.assertListEqual(inputs['token_type_ids'], [2] + [0] * sentence_len + [1] * sentence_len)
| 89 |
"""simple docstring"""
from __future__ import annotations
def snake_case_ ( A_ : list[list[int]] ):
'''simple docstring'''
for i in range(1, len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1, len(A_ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1, len(A_ ) ):
for j in range(1, len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j], matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 | 0 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__UpperCAmelCase = logging.get_logger(__name__)
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : List[str] = ["input_features", "is_longer"]
def __init__( self , lowerCamelCase_=64 , lowerCamelCase_=4_80_00 , lowerCamelCase_=4_80 , lowerCamelCase_=10 , lowerCamelCase_=10_24 , lowerCamelCase_=0.0 , lowerCamelCase_=False , lowerCamelCase_ = 0 , lowerCamelCase_ = 1_40_00 , lowerCamelCase_ = None , lowerCamelCase_ = "fusion" , lowerCamelCase_ = "repeatpad" , **lowerCamelCase_ , ) -> Tuple:
super().__init__(
feature_size=lowerCamelCase_ , sampling_rate=lowerCamelCase_ , padding_value=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , **lowerCamelCase_ , )
lowerCAmelCase__ = top_db
lowerCAmelCase__ = truncation
lowerCAmelCase__ = padding
lowerCAmelCase__ = fft_window_size
lowerCAmelCase__ = (fft_window_size >> 1) + 1
lowerCAmelCase__ = hop_length
lowerCAmelCase__ = max_length_s
lowerCAmelCase__ = max_length_s * sampling_rate
lowerCAmelCase__ = sampling_rate
lowerCAmelCase__ = frequency_min
lowerCAmelCase__ = frequency_max
lowerCAmelCase__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCamelCase_ , min_frequency=lowerCamelCase_ , max_frequency=lowerCamelCase_ , sampling_rate=lowerCamelCase_ , norm=lowerCamelCase_ , mel_scale='''htk''' , )
lowerCAmelCase__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCamelCase_ , min_frequency=lowerCamelCase_ , max_frequency=lowerCamelCase_ , sampling_rate=lowerCamelCase_ , norm='''slaney''' , mel_scale='''slaney''' , )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict[str, Any]:
lowerCAmelCase__ = copy.deepcopy(self.__dict__ )
lowerCAmelCase__ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> np.ndarray:
lowerCAmelCase__ = spectrogram(
lowerCamelCase_ , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=lowerCamelCase_ , log_mel='''dB''' , )
return log_mel_spectrogram.T
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> str:
lowerCAmelCase__ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCAmelCase__ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCAmelCase__ = [0]
# randomly choose index for each part
lowerCAmelCase__ = np.random.choice(ranges[0] )
lowerCAmelCase__ = np.random.choice(ranges[1] )
lowerCAmelCase__ = np.random.choice(ranges[2] )
lowerCAmelCase__ = mel[idx_front : idx_front + chunk_frames, :]
lowerCAmelCase__ = mel[idx_middle : idx_middle + chunk_frames, :]
lowerCAmelCase__ = mel[idx_back : idx_back + chunk_frames, :]
lowerCAmelCase__ = torch.tensor(mel[None, None, :] )
lowerCAmelCase__ = torch.nn.functional.interpolate(
lowerCamelCase_ , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=lowerCamelCase_ )
lowerCAmelCase__ = mel_shrink[0][0].numpy()
lowerCAmelCase__ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> np.array:
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowerCAmelCase__ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowerCAmelCase__ = len(lowerCamelCase_ ) - max_length
lowerCAmelCase__ = np.random.randint(0 , overflow + 1 )
lowerCAmelCase__ = waveform[idx : idx + max_length]
lowerCAmelCase__ = self._np_extract_fbank_features(lowerCamelCase_ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowerCAmelCase__ = self._np_extract_fbank_features(lowerCamelCase_ , self.mel_filters )
lowerCAmelCase__ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowerCAmelCase__ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowerCAmelCase__ = np.stack([mel, mel, mel, mel] , axis=0 )
lowerCAmelCase__ = False
else:
lowerCAmelCase__ = self._random_mel_fusion(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = True
else:
raise NotImplementedError(F"""data_truncating {truncation} not implemented""" )
else:
lowerCAmelCase__ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowerCAmelCase__ = int(max_length / len(lowerCamelCase_ ) )
lowerCAmelCase__ = np.stack(np.tile(lowerCamelCase_ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowerCAmelCase__ = int(max_length / len(lowerCamelCase_ ) )
lowerCAmelCase__ = np.stack(np.tile(lowerCamelCase_ , lowerCamelCase_ ) )
lowerCAmelCase__ = np.pad(lowerCamelCase_ , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 )
if truncation == "fusion":
lowerCAmelCase__ = self._np_extract_fbank_features(lowerCamelCase_ , self.mel_filters )
lowerCAmelCase__ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
lowerCAmelCase__ = self._np_extract_fbank_features(lowerCamelCase_ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> BatchFeature:
lowerCAmelCase__ = truncation if truncation is not None else self.truncation
lowerCAmelCase__ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowerCAmelCase__ = isinstance(lowerCamelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
lowerCAmelCase__ = is_batched_numpy or (
isinstance(lowerCamelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase__ = [np.asarray(lowerCamelCase_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase_ , np.ndarray ):
lowerCAmelCase__ = np.asarray(lowerCamelCase_ , dtype=np.floataa )
elif isinstance(lowerCamelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase__ = [np.asarray(lowerCamelCase_ )]
# convert to mel spectrogram, truncate and pad if needed.
lowerCAmelCase__ = [
self._get_input_mel(lowerCamelCase_ , max_length if max_length else self.nb_max_samples , lowerCamelCase_ , lowerCamelCase_ )
for waveform in raw_speech
]
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for mel, longer in padded_inputs:
input_mel.append(lowerCamelCase_ )
is_longer.append(lowerCamelCase_ )
if truncation == "fusion" and sum(lowerCamelCase_ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowerCAmelCase__ = np.random.randint(0 , len(lowerCamelCase_ ) )
lowerCAmelCase__ = True
if isinstance(input_mel[0] , lowerCamelCase_ ):
lowerCAmelCase__ = [np.asarray(lowerCamelCase_ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowerCAmelCase__ = [[longer] for longer in is_longer]
lowerCAmelCase__ = {'''input_features''': input_mel, '''is_longer''': is_longer}
lowerCAmelCase__ = BatchFeature(lowerCamelCase_ )
if return_tensors is not None:
lowerCAmelCase__ = input_features.convert_to_tensors(lowerCamelCase_ )
return input_features | 90 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('''T''')
class __snake_case ( Generic[T]):
def __init__( self : int , __lowerCAmelCase : T ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = data
_lowerCamelCase : Node[T] | None = None
def __str__( self : Optional[Any] ):
"""simple docstring"""
return f'''{self.data}'''
class __snake_case ( Generic[T]):
def __init__( self : int ):
"""simple docstring"""
_lowerCamelCase : Node[T] | None = None
def __iter__( self : str ):
"""simple docstring"""
_lowerCamelCase : List[str] = self.top
while node:
yield node.data
_lowerCamelCase : Any = node.next
def __str__( self : int ):
"""simple docstring"""
return "->".join([str(__lowerCAmelCase ) for item in self] )
def __len__( self : int ):
"""simple docstring"""
return len(tuple(iter(self ) ) )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return self.top is None
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : T ):
"""simple docstring"""
_lowerCamelCase : Tuple = Node(__lowerCAmelCase )
if not self.is_empty():
_lowerCamelCase : Optional[int] = self.top
_lowerCamelCase : List[str] = node
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
if self.is_empty():
raise IndexError('''pop from empty stack''' )
assert isinstance(self.top , __lowerCAmelCase )
_lowerCamelCase : Any = self.top
_lowerCamelCase : Any = self.top.next
return pop_node.data
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
if self.is_empty():
raise IndexError('''peek from empty stack''' )
assert self.top is not None
return self.top.data
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : List[str] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 83 | 0 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
def _snake_case ( snake_case__ : str , snake_case__ : List[Any]=False ):
A = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith('head' ):
A = 'segformer.encoder.' + key
if key.startswith('backbone' ):
A = key.replace('backbone' , 'segformer.encoder' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
A = key[key.find('patch_embed' ) + len('patch_embed' )]
A = key.replace(F'patch_embed{idx}' , F'patch_embeddings.{int(snake_case__ )-1}' )
if "norm" in key:
A = key.replace('norm' , 'layer_norm' )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
A = key[key.find('segformer.encoder.layer_norm' ) + len('segformer.encoder.layer_norm' )]
A = key.replace(F'layer_norm{idx}' , F'layer_norm.{int(snake_case__ )-1}' )
if "layer_norm1" in key:
A = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
A = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
A = key[key.find('block' ) + len('block' )]
A = key.replace(F'block{idx}' , F'block.{int(snake_case__ )-1}' )
if "attn.q" in key:
A = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
A = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
A = key.replace('attn' , 'attention.self' )
if "fc1" in key:
A = key.replace('fc1' , 'dense1' )
if "fc2" in key:
A = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
A = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
A = key.replace('linear_fuse.conv' , 'linear_fuse' )
A = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
A = key[key.find('linear_c' ) + len('linear_c' )]
A = key.replace(F'linear_c{idx}' , F'linear_c.{int(snake_case__ )-1}' )
if key.startswith('head' ):
A = key.replace('head' , 'classifier' )
A = value
return new_state_dict
def _snake_case ( snake_case__ : List[Any] , snake_case__ : Union[str, Any] ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
A = state_dict.pop(F'segformer.encoder.block.{i}.{j}.attention.self.kv.weight' )
A = state_dict.pop(F'segformer.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
A = kv_weight[
: config.hidden_sizes[i], :
]
A = kv_bias[: config.hidden_sizes[i]]
A = kv_weight[
config.hidden_sizes[i] :, :
]
A = kv_bias[
config.hidden_sizes[i] :
]
def _snake_case ( ):
A = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return image
@torch.no_grad()
def _snake_case ( snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : Tuple ):
A = SegformerConfig()
A = False
# set attributes based on model_name
A = 'huggingface/label-files'
if "segformer" in model_name:
A = model_name[len('segformer.' ) : len('segformer.' ) + 2]
if "ade" in model_name:
A = 150
A = 'ade20k-id2label.json'
A = (1, 150, 128, 128)
elif "city" in model_name:
A = 19
A = 'cityscapes-id2label.json'
A = (1, 19, 128, 128)
else:
raise ValueError(F'Model {model_name} not supported' )
elif "mit" in model_name:
A = True
A = model_name[4:6]
A = 1000
A = 'imagenet-1k-id2label.json'
A = (1, 1000)
else:
raise ValueError(F'Model {model_name} not supported' )
# set config attributes
A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
A = {int(snake_case__ ): v for k, v in idalabel.items()}
A = idalabel
A = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
A = [64, 128, 320, 512]
A = 256
elif size == "b2":
A = [64, 128, 320, 512]
A = 768
A = [3, 4, 6, 3]
elif size == "b3":
A = [64, 128, 320, 512]
A = 768
A = [3, 4, 18, 3]
elif size == "b4":
A = [64, 128, 320, 512]
A = 768
A = [3, 8, 27, 3]
elif size == "b5":
A = [64, 128, 320, 512]
A = 768
A = [3, 6, 40, 3]
else:
raise ValueError(F'Size {size} not supported' )
# load image processor (only resize + normalize)
A = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=snake_case__ , align=snake_case__ , do_random_crop=snake_case__ )
# prepare image
A = prepare_img()
A = image_processor(images=snake_case__ , return_tensors='pt' ).pixel_values
logger.info(F'Converting model {model_name}...' )
# load original state dict
if encoder_only:
A = torch.load(snake_case__ , map_location=torch.device('cpu' ) )
else:
A = torch.load(snake_case__ , map_location=torch.device('cpu' ) )['state_dict']
# rename keys
A = rename_keys(snake_case__ , encoder_only=snake_case__ )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(snake_case__ , snake_case__ )
# create HuggingFace model and load state dict
if encoder_only:
A = False
A = SegformerForImageClassification(snake_case__ )
else:
A = SegformerForSemanticSegmentation(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
# forward pass
A = model(snake_case__ )
A = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
A = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
A = torch.tensor(
[
[[-7.5820, -8.7231, -8.3215], [-8.0600, -10.3529, -10.0304], [-7.5208, -9.4103, -9.6239]],
[[-12.6918, -13.8994, -13.7137], [-13.3196, -15.7523, -15.4789], [-12.9343, -14.8757, -14.9689]],
[[-11.1911, -11.9421, -11.3243], [-11.3342, -13.6839, -13.3581], [-10.3909, -12.1832, -12.4858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
A = torch.tensor(
[
[[-11.8173, -14.3850, -16.3128], [-14.5648, -16.5804, -18.6568], [-14.7223, -15.7387, -18.4218]],
[[-15.7290, -17.9171, -19.4423], [-18.3105, -19.9448, -21.4661], [-17.9296, -18.6497, -20.7910]],
[[-15.0783, -17.0336, -18.2789], [-16.8771, -18.6870, -20.1612], [-16.2454, -17.1426, -19.5055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
A = torch.tensor(
[
[[-9.0878, -10.2081, -10.1891], [-9.3144, -10.7941, -10.9843], [-9.2294, -10.3855, -10.5704]],
[[-12.2316, -13.9068, -13.6102], [-12.9161, -14.3702, -14.3235], [-12.5233, -13.7174, -13.7932]],
[[-14.6275, -15.2490, -14.9727], [-14.3400, -15.9687, -16.2827], [-14.1484, -15.4033, -15.8937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
A = torch.tensor(
[
[[-12.3144, -13.2447, -14.0802], [-13.3614, -14.5816, -15.6117], [-13.3340, -14.4433, -16.2219]],
[[-19.2781, -20.4128, -20.7506], [-20.6153, -21.6566, -22.0998], [-19.9800, -21.0430, -22.1494]],
[[-18.8739, -19.7804, -21.1834], [-20.1233, -21.6765, -23.2944], [-20.0315, -21.2641, -23.6944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
A = torch.tensor(
[
[[-9.5524, -12.0835, -11.7348], [-10.5229, -13.6446, -14.5662], [-9.5842, -12.8851, -13.9414]],
[[-15.3432, -17.5323, -17.0818], [-16.3330, -18.9255, -19.2101], [-15.1340, -17.7848, -18.3971]],
[[-12.6072, -14.9486, -14.6631], [-13.7629, -17.0907, -17.7745], [-12.7899, -16.1695, -17.1671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
A = torch.tensor(
[
[[-11.9295, -13.4057, -14.8106], [-13.3431, -14.8179, -15.3781], [-14.2836, -15.5942, -16.1588]],
[[-11.4906, -12.8067, -13.6564], [-13.1189, -14.0500, -14.1543], [-13.8748, -14.5136, -14.8789]],
[[0.5374, 0.1067, -0.4742], [0.1141, -0.2255, -0.7099], [-0.3000, -0.5924, -1.3105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
A = torch.tensor(
[
[[-7.8217, -9.8767, -10.1717], [-9.4438, -10.9058, -11.4047], [-9.7939, -12.3495, -12.1079]],
[[-7.1514, -9.5336, -10.0860], [-9.7776, -11.6822, -11.8439], [-10.1411, -12.7655, -12.8972]],
[[0.3021, 0.0805, -0.2310], [-0.0328, -0.1605, -0.2714], [-0.1408, -0.5477, -0.6976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
A = torch.tensor(
[
[
[-1.13_72e01, -1.27_87e01, -1.34_77e01],
[-1.25_36e01, -1.41_94e01, -1.44_09e01],
[-1.32_17e01, -1.48_88e01, -1.53_27e01],
],
[
[-1.47_91e01, -1.71_22e01, -1.82_77e01],
[-1.71_63e01, -1.91_92e01, -1.95_33e01],
[-1.78_97e01, -1.99_91e01, -2.03_15e01],
],
[
[7.67_23e-01, 4.19_21e-01, -7.78_78e-02],
[4.77_72e-01, 9.55_57e-03, -2.80_82e-01],
[3.60_32e-01, -2.48_26e-01, -5.11_68e-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
A = torch.tensor(
[
[[-9.4959, -11.3087, -11.7479], [-11.0025, -12.6540, -12.3319], [-11.4064, -13.0487, -12.9905]],
[[-9.8905, -11.3084, -12.0854], [-11.1726, -12.7698, -12.9583], [-11.5985, -13.3278, -14.1774]],
[[0.2213, 0.0192, -0.2466], [-0.1731, -0.4213, -0.4874], [-0.3126, -0.6541, -1.1389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
A = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
A = torch.tensor(
[
[[-16.0976, -16.4856, -17.3962], [-16.6234, -19.0342, -19.7685], [-16.0900, -18.0661, -19.1180]],
[[-18.4750, -18.8488, -19.5074], [-19.4030, -22.1570, -22.5977], [-19.1191, -20.8486, -22.3783]],
[[-4.5178, -5.5037, -6.5109], [-5.0884, -7.2174, -8.0334], [-4.4156, -5.8117, -7.2970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
A = torch.tensor(
[
[[-14.2081, -14.4732, -14.1977], [-14.5867, -16.4423, -16.6356], [-13.4441, -14.9685, -16.8696]],
[[-14.4576, -14.7073, -15.0451], [-15.0816, -17.6237, -17.9873], [-14.4213, -16.0199, -18.5992]],
[[-4.7349, -4.9588, -5.0966], [-4.3210, -6.9325, -7.2591], [-3.4312, -4.7484, -7.1917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
A = torch.tensor(
[
[[-11.7737, -11.9526, -11.3273], [-13.6692, -14.4574, -13.8878], [-13.8937, -14.6924, -15.9345]],
[[-14.6706, -14.5330, -14.1306], [-16.1502, -16.8180, -16.4269], [-16.8338, -17.8939, -20.1746]],
[[1.0491, 0.8289, 1.0310], [1.1044, 0.5219, 0.8055], [1.0899, 0.6926, 0.5590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
A = torch.tensor(
[
[[-12.5641, -13.4777, -13.0684], [-13.9587, -15.8983, -16.6557], [-13.3109, -15.7350, -16.3141]],
[[-14.7074, -15.4352, -14.5944], [-16.6353, -18.1663, -18.6120], [-15.1702, -18.0329, -18.1547]],
[[-1.7990, -2.0951, -1.7784], [-2.6397, -3.8245, -3.9686], [-1.5264, -2.8126, -2.9316]],
] )
else:
A = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , snake_case__ , atol=1e-2 )
# finally, save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
image_processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''segformer.b0.512x512.ade.160k''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
_lowercase = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path) | 91 |
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCAmelCase__ = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase__ = importlib.util.spec_from_file_location(
'''transformers''',
os.path.join(PATH_TO_TRANSFORMERS, '''__init__.py'''),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
lowerCAmelCase__ = spec.loader.load_module()
lowerCAmelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCAmelCase__ = re.compile('''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
lowerCAmelCase__ = {
'''CLIPConfigMixin''',
'''DecisionTransformerConfigMixin''',
'''EncoderDecoderConfigMixin''',
'''RagConfigMixin''',
'''SpeechEncoderDecoderConfigMixin''',
'''VisionEncoderDecoderConfigMixin''',
'''VisionTextDualEncoderConfigMixin''',
}
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Any = []
for config_class in list(CONFIG_MAPPING.values() ):
_lowerCamelCase : Tuple = False
# source code of `config_class`
_lowerCamelCase : int = inspect.getsource(A_ )
_lowerCamelCase : str = _re_checkpoint.findall(A_ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
_lowerCamelCase , _lowerCamelCase : Tuple = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
_lowerCamelCase : Tuple = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
_lowerCamelCase : Union[str, Any] = True
break
_lowerCamelCase : Tuple = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(A_ )
if len(A_ ) > 0:
_lowerCamelCase : Union[str, Any] = '''\n'''.join(sorted(A_ ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 83 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
UpperCamelCase_ = TypeVar("""T""")
UpperCamelCase_ = TypeVar("""U""")
class __SCREAMING_SNAKE_CASE ( Generic[T, U] ):
def __init__( self : List[str] , UpperCAmelCase__ : T | None , UpperCAmelCase__ : U | None ):
'''simple docstring'''
lowercase : List[str] =key
lowercase : Optional[Any] =val
lowercase : DoubleLinkedListNode[T, U] | None =None
lowercase : DoubleLinkedListNode[T, U] | None =None
def __repr__( self : Any ):
'''simple docstring'''
return (
F'''Node: key: {self.key}, val: {self.val}, '''
F'''has next: {bool(self.next )}, has prev: {bool(self.prev )}'''
)
class __SCREAMING_SNAKE_CASE ( Generic[T, U] ):
def __init__( self : int ):
'''simple docstring'''
lowercase : DoubleLinkedListNode[T, U] =DoubleLinkedListNode(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : DoubleLinkedListNode[T, U] =DoubleLinkedListNode(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase , lowercase : Any =self.rear, self.head
def __repr__( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Any =['''DoubleLinkedList''']
lowercase : Union[str, Any] =self.head
while node.next is not None:
rep.append(str(UpperCAmelCase__ ) )
lowercase : Any =node.next
rep.append(str(self.rear ) )
return ",\n ".join(UpperCAmelCase__ )
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : DoubleLinkedListNode[T, U] ):
'''simple docstring'''
lowercase : Optional[int] =self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
lowercase : Dict =node
lowercase : str =previous
lowercase : Optional[Any] =node
lowercase : Optional[int] =self.rear
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : DoubleLinkedListNode[T, U] ):
'''simple docstring'''
if node.prev is None or node.next is None:
return None
lowercase : Any =node.next
lowercase : Optional[int] =node.prev
lowercase : Any =None
lowercase : int =None
return node
class __SCREAMING_SNAKE_CASE ( Generic[T, U] ):
lowerCamelCase_ = {}
def __init__( self : List[Any] , UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : DoubleLinkedList[T, U] =DoubleLinkedList()
lowercase : Union[str, Any] =capacity
lowercase : int =0
lowercase : List[Any] =0
lowercase : Optional[Any] =0
lowercase : dict[T, DoubleLinkedListNode[T, U]] ={}
def __repr__( self : List[str] ):
'''simple docstring'''
return (
F'''CacheInfo(hits={self.hits}, misses={self.miss}, '''
F'''capacity={self.capacity}, current size={self.num_keys})'''
)
def __contains__( self : Dict , UpperCAmelCase__ : T ):
'''simple docstring'''
return key in self.cache
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : T ):
'''simple docstring'''
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
lowercase : DoubleLinkedListNode[T, U] =self.cache[key]
lowercase : int =self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(UpperCAmelCase__ )
return node.val
self.miss += 1
return None
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : T , UpperCAmelCase__ : U ):
'''simple docstring'''
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
lowercase : Dict =self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(UpperCAmelCase__ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
lowercase : Dict =DoubleLinkedListNode(UpperCAmelCase__ , UpperCAmelCase__ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
lowercase : Union[str, Any] =self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
lowercase : Any =value
self.list.add(UpperCAmelCase__ )
@classmethod
def lowerCamelCase_ ( cls : Optional[Any] , UpperCAmelCase__ : int = 128 ):
'''simple docstring'''
def cache_decorator_inner(UpperCAmelCase__ : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*UpperCAmelCase__ : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
lowercase : Dict =LRUCache(UpperCAmelCase__ )
lowercase : Tuple =cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
lowercase : Union[str, Any] =func(*UpperCAmelCase__ )
cls.decorator_function_to_instance_map[func].put(args[0] , UpperCAmelCase__ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(UpperCAmelCase__ , '''cache_info''' , UpperCAmelCase__ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
lowerCAmelCase__ = False
class __snake_case ( unittest.TestCase):
pass
@nightly
@require_torch_gpu
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : int = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : str = '''A painting of a squirrel eating a burger '''
_lowerCamelCase : Dict = torch.manual_seed(0 )
_lowerCamelCase : List[Any] = pipe(
prompt=__lowerCAmelCase , generator=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : Tuple = VersatileDiffusionTextToImagePipeline.from_pretrained(__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : int = generator.manual_seed(0 )
_lowerCamelCase : List[str] = pipe(
prompt=__lowerCAmelCase , generator=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = VersatileDiffusionTextToImagePipeline.from_pretrained(
'''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = '''A painting of a squirrel eating a burger '''
_lowerCamelCase : Optional[int] = torch.manual_seed(0 )
_lowerCamelCase : int = pipe(
prompt=__lowerCAmelCase , generator=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' ).images
_lowerCamelCase : List[str] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : Dict = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 83 | 0 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"""facebook/wav2vec2-base-960h""": """https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json""",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :List[Any] = """wav2vec2"""
def __init__( self , __UpperCAmelCase=3_2 , __UpperCAmelCase=7_6_8 , __UpperCAmelCase=1_2 , __UpperCAmelCase=1_2 , __UpperCAmelCase=3_0_7_2 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-5 , __UpperCAmelCase="group" , __UpperCAmelCase="gelu" , __UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , __UpperCAmelCase=(1_0, 3, 3, 3, 3, 2, 2) , __UpperCAmelCase=False , __UpperCAmelCase=1_2_8 , __UpperCAmelCase=1_6 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=0.05 , __UpperCAmelCase=1_0 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0 , __UpperCAmelCase=1_0 , __UpperCAmelCase=0 , __UpperCAmelCase=3_2_0 , __UpperCAmelCase=2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1_0_0 , __UpperCAmelCase=2_5_6 , __UpperCAmelCase=2_5_6 , __UpperCAmelCase=0.1 , __UpperCAmelCase="sum" , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=2_5_6 , __UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , __UpperCAmelCase=(5, 3, 3, 1, 1) , __UpperCAmelCase=(1, 2, 3, 1, 1) , __UpperCAmelCase=5_1_2 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=False , __UpperCAmelCase=3 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = hidden_size
lowerCAmelCase__ :List[str] = feat_extract_norm
lowerCAmelCase__ :Union[str, Any] = feat_extract_activation
lowerCAmelCase__ :List[str] = list(__UpperCAmelCase )
lowerCAmelCase__ :int = list(__UpperCAmelCase )
lowerCAmelCase__ :Dict = list(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = conv_bias
lowerCAmelCase__ :List[Any] = num_conv_pos_embeddings
lowerCAmelCase__ :int = num_conv_pos_embedding_groups
lowerCAmelCase__ :str = len(self.conv_dim )
lowerCAmelCase__ :str = num_hidden_layers
lowerCAmelCase__ :Optional[int] = intermediate_size
lowerCAmelCase__ :List[Any] = hidden_act
lowerCAmelCase__ :int = num_attention_heads
lowerCAmelCase__ :Optional[int] = hidden_dropout
lowerCAmelCase__ :List[str] = attention_dropout
lowerCAmelCase__ :Tuple = activation_dropout
lowerCAmelCase__ :int = feat_proj_dropout
lowerCAmelCase__ :str = final_dropout
lowerCAmelCase__ :Tuple = layerdrop
lowerCAmelCase__ :Tuple = layer_norm_eps
lowerCAmelCase__ :Optional[Any] = initializer_range
lowerCAmelCase__ :List[str] = vocab_size
lowerCAmelCase__ :Any = do_stable_layer_norm
lowerCAmelCase__ :Optional[Any] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase__ :int = apply_spec_augment
lowerCAmelCase__ :List[str] = mask_time_prob
lowerCAmelCase__ :Dict = mask_time_length
lowerCAmelCase__ :List[Any] = mask_time_min_masks
lowerCAmelCase__ :Tuple = mask_feature_prob
lowerCAmelCase__ :str = mask_feature_length
lowerCAmelCase__ :List[str] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCAmelCase__ :Optional[Any] = num_codevectors_per_group
lowerCAmelCase__ :Optional[Any] = num_codevector_groups
lowerCAmelCase__ :Optional[int] = contrastive_logits_temperature
lowerCAmelCase__ :List[Any] = feat_quantizer_dropout
lowerCAmelCase__ :Optional[int] = num_negatives
lowerCAmelCase__ :Any = codevector_dim
lowerCAmelCase__ :List[Any] = proj_codevector_dim
lowerCAmelCase__ :str = diversity_loss_weight
# ctc loss
lowerCAmelCase__ :Tuple = ctc_loss_reduction
lowerCAmelCase__ :Optional[int] = ctc_zero_infinity
# adapter
lowerCAmelCase__ :Dict = add_adapter
lowerCAmelCase__ :Optional[int] = adapter_kernel_size
lowerCAmelCase__ :Dict = adapter_stride
lowerCAmelCase__ :List[str] = num_adapter_layers
lowerCAmelCase__ :Any = output_hidden_size or hidden_size
lowerCAmelCase__ :Any = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowerCAmelCase__ :Optional[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowerCAmelCase__ :Optional[int] = list(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = list(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = list(__UpperCAmelCase )
lowerCAmelCase__ :Dict = xvector_output_dim
@property
def snake_case ( self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 93 |
"""simple docstring"""
import os
import time
import numpy as np
import onnxruntime as ort
lowerCAmelCase__ = '''1'''
lowerCAmelCase__ = '''0'''
lowerCAmelCase__ = '''1'''
lowerCAmelCase__ = ort.SessionOptions()
lowerCAmelCase__ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('''Create inference session...''')
lowerCAmelCase__ = ['''TensorrtExecutionProvider''', '''CUDAExecutionProvider''']
lowerCAmelCase__ = ort.InferenceSession('''model.onnx''', sess_options=sess_opt, providers=execution_provider)
lowerCAmelCase__ = ort.RunOptions()
lowerCAmelCase__ = 128
lowerCAmelCase__ = 1
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
print('''Warm up phase...''')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Start inference...''')
lowerCAmelCase__ = time.time()
lowerCAmelCase__ = 2000
lowerCAmelCase__ = {}
for iter in range(max_iters):
lowerCAmelCase__ = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Average Inference Time = {:.3f} ms'''.format((time.time() - start_time) * 1000 / max_iters))
| 83 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = None
UpperCamelCase_ = None
UpperCamelCase_ = None
UpperCamelCase_ = None
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase : Tuple=1 , UpperCAmelCase : List[str]=0 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : List[str]=512 , UpperCAmelCase : Optional[int]="cls" , UpperCAmelCase : Tuple=False , UpperCAmelCase : Union[str, Any]=True , **UpperCAmelCase : str , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
lowercase : Union[str, Any] =project_dim
lowercase : str =pooler_fn
lowercase : Any =learn_encoder
lowercase : Any =use_attention_mask
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = [r'''pooler''', r'''logit_scale''']
UpperCamelCase_ = [r'''position_ids''', r'''predictions.decoder.bias''']
UpperCamelCase_ = '''roberta'''
UpperCamelCase_ = RobertaSeriesConfig
def __init__( self : Dict , UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
super().__init__(UpperCAmelCase )
lowercase : Dict =XLMRobertaModel(UpperCAmelCase )
lowercase : str =nn.Linear(config.hidden_size , config.project_dim )
lowercase : Dict =getattr(UpperCAmelCase , '''has_pre_transformation''' , UpperCAmelCase )
if self.has_pre_transformation:
lowercase : Union[str, Any] =nn.Linear(config.hidden_size , config.project_dim )
lowercase : Any =nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def A__ ( self : Union[str, Any] , UpperCAmelCase : Optional[torch.Tensor] = None , UpperCAmelCase : Optional[torch.Tensor] = None , UpperCAmelCase : Optional[torch.Tensor] = None , UpperCAmelCase : Optional[torch.Tensor] = None , UpperCAmelCase : Optional[torch.Tensor] = None , UpperCAmelCase : Optional[torch.Tensor] = None , UpperCAmelCase : Optional[torch.Tensor] = None , UpperCAmelCase : Optional[torch.Tensor] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =return_dict if return_dict is not None else self.config.use_return_dict
lowercase : int =self.base_model(
input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , position_ids=UpperCAmelCase , head_mask=UpperCAmelCase , inputs_embeds=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , output_attentions=UpperCAmelCase , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=UpperCAmelCase , )
if self.has_pre_transformation:
lowercase : Union[str, Any] =outputs['''hidden_states'''][-2]
lowercase : Optional[int] =self.pre_LN(UpperCAmelCase )
lowercase : Dict =self.transformation_pre(UpperCAmelCase )
return TransformationModelOutput(
projection_state=UpperCAmelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
lowercase : Dict =self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=UpperCAmelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 94 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def snake_case_ ( A_ : float, A_ : float, A_ : int ):
'''simple docstring'''
_lowerCamelCase : List[Any] = x
_lowerCamelCase : List[Any] = y
for step in range(A_ ): # noqa: B007
_lowerCamelCase : Dict = a * a - b * b + x
_lowerCamelCase : List[str] = 2 * a * b + y
_lowerCamelCase : Any = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def snake_case_ ( A_ : float ):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def snake_case_ ( A_ : float ):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(A_, 1, 1 ) )
def snake_case_ ( A_ : int = 8_00, A_ : int = 6_00, A_ : float = -0.6, A_ : float = 0, A_ : float = 3.2, A_ : int = 50, A_ : bool = True, ):
'''simple docstring'''
_lowerCamelCase : Tuple = Image.new('''RGB''', (image_width, image_height) )
_lowerCamelCase : int = img.load()
# loop through the image-coordinates
for image_x in range(A_ ):
for image_y in range(A_ ):
# determine the figure-coordinates based on the image-coordinates
_lowerCamelCase : Optional[Any] = figure_width / image_width * image_height
_lowerCamelCase : List[Any] = figure_center_x + (image_x / image_width - 0.5) * figure_width
_lowerCamelCase : Optional[Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
_lowerCamelCase : str = get_distance(A_, A_, A_ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_lowerCamelCase : Dict = get_color_coded_rgb(A_ )
else:
_lowerCamelCase : str = get_black_and_white_rgb(A_ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowerCAmelCase__ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 83 | 0 |
"""simple docstring"""
import argparse
import datetime
def snake_case ( A__ ):
UpperCAmelCase_ : Union[str, Any] = {
"0": "Sunday",
"1": "Monday",
"2": "Tuesday",
"3": "Wednesday",
"4": "Thursday",
"5": "Friday",
"6": "Saturday",
}
UpperCAmelCase_ : List[Any] = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(A__ ) < 11:
raise ValueError("Must be 10 characters long" )
# Get month
UpperCAmelCase_ : int = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError("Month must be between 1 - 12" )
UpperCAmelCase_ : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'" )
# Get day
UpperCAmelCase_ : int = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError("Date must be between 1 - 31" )
# Get second separator
UpperCAmelCase_ : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'" )
# Get year
UpperCAmelCase_ : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 85_00:
raise ValueError(
"Year out of range. There has to be some sort of limit...right?" )
# Get datetime obj for validation
UpperCAmelCase_ : Tuple = datetime.date(int(A__ ) ,int(A__ ) ,int(A__ ) )
# Start math
if m <= 2:
UpperCAmelCase_ : Union[str, Any] = y - 1
UpperCAmelCase_ : Union[str, Any] = m + 12
# maths var
UpperCAmelCase_ : int = int(str(A__ )[:2] )
UpperCAmelCase_ : int = int(str(A__ )[2:] )
UpperCAmelCase_ : int = int(2.6 * m - 5.39 )
UpperCAmelCase_ : int = int(c / 4 )
UpperCAmelCase_ : int = int(k / 4 )
UpperCAmelCase_ : int = int(d + k )
UpperCAmelCase_ : int = int(t + u + v + x )
UpperCAmelCase_ : int = int(z - (2 * c) )
UpperCAmelCase_ : int = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("The date was evaluated incorrectly. Contact developer." )
# Response
UpperCAmelCase_ : str = F"""Your date {date_input}, is a {days[str(A__ )]}!"""
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ = argparse.ArgumentParser(
description=(
'''Find out what day of the week nearly any date is or was. Enter '''
'''date as a string in the mm-dd-yyyy or mm/dd/yyyy format'''
)
)
parser.add_argument(
'''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)'''
)
lowerCamelCase_ = parser.parse_args()
zeller(args.date_input)
| 95 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def snake_case_ ( A_ : Tuple, A_ : List[str], A_ : Optional[Any], A_ : Dict, A_ : Dict=True, A_ : int="pt" ):
'''simple docstring'''
_lowerCamelCase : str = {'''add_prefix_space''': True} if isinstance(A_, A_ ) and not line.startswith(''' ''' ) else {}
_lowerCamelCase : Union[str, Any] = padding_side
return tokenizer(
[line], max_length=A_, padding='''max_length''' if pad_to_max_length else None, truncation=A_, return_tensors=A_, add_special_tokens=A_, **A_, )
def snake_case_ ( A_ : Any, A_ : Optional[int], A_ : List[Any]=None, ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = input_ids.ne(A_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __snake_case ( _lowercase):
def __init__( self : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple="train" , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Any=None , __lowerCAmelCase : Union[str, Any]="" , ):
"""simple docstring"""
super().__init__()
_lowerCamelCase : Optional[int] = Path(__lowerCAmelCase ).joinpath(type_path + '''.source''' )
_lowerCamelCase : List[str] = Path(__lowerCAmelCase ).joinpath(type_path + '''.target''' )
_lowerCamelCase : List[Any] = self.get_char_lens(self.src_file )
_lowerCamelCase : Optional[int] = max_source_length
_lowerCamelCase : Optional[Any] = max_target_length
assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}'''
_lowerCamelCase : List[Any] = tokenizer
_lowerCamelCase : List[Any] = prefix
if n_obs is not None:
_lowerCamelCase : List[str] = self.src_lens[:n_obs]
_lowerCamelCase : int = src_lang
_lowerCamelCase : Union[str, Any] = tgt_lang
def __len__( self : int ):
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self : Dict , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = index + 1 # linecache starts at 1
_lowerCamelCase : Union[str, Any] = self.prefix + linecache.getline(str(self.src_file ) , __lowerCAmelCase ).rstrip('''\n''' )
_lowerCamelCase : Optional[Any] = linecache.getline(str(self.tgt_file ) , __lowerCAmelCase ).rstrip('''\n''' )
assert source_line, f'''empty source line for index {index}'''
assert tgt_line, f'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , __lowerCAmelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_lowerCamelCase : Optional[int] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , __lowerCAmelCase ) else self.tokenizer
)
_lowerCamelCase : Union[str, Any] = self.tokenizer.generator if isinstance(self.tokenizer , __lowerCAmelCase ) else self.tokenizer
_lowerCamelCase : List[str] = encode_line(__lowerCAmelCase , __lowerCAmelCase , self.max_source_length , '''right''' )
_lowerCamelCase : List[str] = encode_line(__lowerCAmelCase , __lowerCAmelCase , self.max_target_length , '''right''' )
_lowerCamelCase : Optional[Any] = source_inputs['''input_ids'''].squeeze()
_lowerCamelCase : Union[str, Any] = target_inputs['''input_ids'''].squeeze()
_lowerCamelCase : Any = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase : str ):
"""simple docstring"""
return [len(__lowerCAmelCase ) for x in Path(__lowerCAmelCase ).open().readlines()]
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : List[Any] = torch.stack([x['''input_ids'''] for x in batch] )
_lowerCamelCase : Tuple = torch.stack([x['''attention_mask'''] for x in batch] )
_lowerCamelCase : Union[str, Any] = torch.stack([x['''decoder_input_ids'''] for x in batch] )
_lowerCamelCase : Tuple = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , __lowerCAmelCase )
else self.tokenizer.pad_token_id
)
_lowerCamelCase : Tuple = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , __lowerCAmelCase )
else self.tokenizer.pad_token_id
)
_lowerCamelCase : Union[str, Any] = trim_batch(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : List[str] = trim_batch(__lowerCAmelCase , __lowerCAmelCase , attention_mask=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
lowerCAmelCase__ = getLogger(__name__)
def snake_case_ ( A_ : List[List] ):
'''simple docstring'''
return list(itertools.chain.from_iterable(A_ ) )
def snake_case_ ( A_ : str ):
'''simple docstring'''
_lowerCamelCase : Dict = get_git_info()
save_json(A_, os.path.join(A_, '''git_log.json''' ) )
def snake_case_ ( A_ : str, A_ : Union[str, Any], A_ : int=4, **A_ : Optional[int] ):
'''simple docstring'''
with open(A_, '''w''' ) as f:
json.dump(A_, A_, indent=A_, **A_ )
def snake_case_ ( A_ : Any ):
'''simple docstring'''
with open(A_ ) as f:
return json.load(A_ )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : List[str] = git.Repo(search_parent_directories=A_ )
_lowerCamelCase : str = {
'''repo_id''': str(A_ ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def snake_case_ ( A_ : Callable, A_ : Iterable ):
'''simple docstring'''
return list(map(A_, A_ ) )
def snake_case_ ( A_ : str, A_ : Tuple ):
'''simple docstring'''
with open(A_, '''wb''' ) as f:
return pickle.dump(A_, A_ )
def snake_case_ ( A_ : List[str] ):
'''simple docstring'''
def remove_articles(A_ : str ):
return re.sub(R'''\b(a|an|the)\b''', ''' ''', A_ )
def white_space_fix(A_ : Any ):
return " ".join(text.split() )
def remove_punc(A_ : List[Any] ):
_lowerCamelCase : Any = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(A_ : Optional[int] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(A_ ) ) ) )
def snake_case_ ( A_ : int, A_ : List[Any] ):
'''simple docstring'''
_lowerCamelCase : str = normalize_answer(A_ ).split()
_lowerCamelCase : int = normalize_answer(A_ ).split()
_lowerCamelCase : str = Counter(A_ ) & Counter(A_ )
_lowerCamelCase : Any = sum(common.values() )
if num_same == 0:
return 0
_lowerCamelCase : int = 1.0 * num_same / len(A_ )
_lowerCamelCase : str = 1.0 * num_same / len(A_ )
_lowerCamelCase : List[Any] = (2 * precision * recall) / (precision + recall)
return fa
def snake_case_ ( A_ : Dict, A_ : str ):
'''simple docstring'''
return normalize_answer(A_ ) == normalize_answer(A_ )
def snake_case_ ( A_ : List[str], A_ : List[str] ):
'''simple docstring'''
assert len(A_ ) == len(A_ )
_lowerCamelCase : Optional[Any] = 0
for hypo, pred in zip(A_, A_ ):
em += exact_match_score(A_, A_ )
if len(A_ ) > 0:
em /= len(A_ )
return {"em": em}
def snake_case_ ( A_ : Optional[int] ):
'''simple docstring'''
return model_prefix.startswith('''rag''' )
def snake_case_ ( A_ : Dict, A_ : int, A_ : List[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_lowerCamelCase : Tuple = '''dropout_rate'''
for p in extra_params:
if getattr(A_, A_, A_ ):
if not hasattr(A_, A_ ) and not hasattr(A_, equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(A_ ) )
delattr(A_, A_ )
continue
_lowerCamelCase : Union[str, Any] = p if hasattr(A_, A_ ) else equivalent_param[p]
setattr(A_, A_, getattr(A_, A_ ) )
delattr(A_, A_ )
return hparams, config
| 83 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
__lowerCamelCase = {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/config.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/config.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/config.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/config.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/config.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/config.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json',
}
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = "albert"
def __init__( self : str , __snake_case : Optional[int]=3_0_0_0_0 , __snake_case : Tuple=1_2_8 , __snake_case : Union[str, Any]=4_0_9_6 , __snake_case : List[Any]=1_2 , __snake_case : Optional[int]=1 , __snake_case : str=6_4 , __snake_case : Optional[int]=1_6_3_8_4 , __snake_case : Optional[Any]=1 , __snake_case : Optional[int]="gelu_new" , __snake_case : Union[str, Any]=0 , __snake_case : Any=0 , __snake_case : Tuple=5_1_2 , __snake_case : Tuple=2 , __snake_case : Optional[int]=0.02 , __snake_case : Any=1E-12 , __snake_case : Any=0.1 , __snake_case : Optional[Any]="absolute" , __snake_case : List[Any]=0 , __snake_case : Tuple=2 , __snake_case : List[Any]=3 , **__snake_case : Optional[Any] , ) -> Any:
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
__magic_name__: Tuple = vocab_size
__magic_name__: int = embedding_size
__magic_name__: Union[str, Any] = hidden_size
__magic_name__: Tuple = num_hidden_layers
__magic_name__: List[str] = num_hidden_groups
__magic_name__: List[str] = num_attention_heads
__magic_name__: List[Any] = inner_group_num
__magic_name__: Tuple = hidden_act
__magic_name__: int = intermediate_size
__magic_name__: List[Any] = hidden_dropout_prob
__magic_name__: Union[str, Any] = attention_probs_dropout_prob
__magic_name__: List[str] = max_position_embeddings
__magic_name__: Any = type_vocab_size
__magic_name__: Optional[Any] = initializer_range
__magic_name__: int = layer_norm_eps
__magic_name__: Any = classifier_dropout_prob
__magic_name__: int = position_embedding_type
class __A ( SCREAMING_SNAKE_CASE_ ):
@property
def lowerCamelCase__ ( self : str ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__magic_name__: str = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__magic_name__: List[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 96 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class __snake_case ( _lowercase):
snake_case__ : Optional[Any] = "camembert"
def __init__( self : Optional[Any] , __lowerCAmelCase : Any=3_0_5_2_2 , __lowerCAmelCase : List[str]=7_6_8 , __lowerCAmelCase : List[str]=1_2 , __lowerCAmelCase : Optional[int]=1_2 , __lowerCAmelCase : List[Any]=3_0_7_2 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Optional[int]=5_1_2 , __lowerCAmelCase : str=2 , __lowerCAmelCase : int=0.02 , __lowerCAmelCase : List[Any]=1E-12 , __lowerCAmelCase : Union[str, Any]=1 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : str="absolute" , __lowerCAmelCase : Any=True , __lowerCAmelCase : Optional[int]=None , **__lowerCAmelCase : Optional[int] , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : str = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Any = num_attention_heads
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : List[str] = intermediate_size
_lowerCamelCase : Optional[Any] = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Optional[Any] = max_position_embeddings
_lowerCamelCase : Tuple = type_vocab_size
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : Dict = layer_norm_eps
_lowerCamelCase : List[Any] = position_embedding_type
_lowerCamelCase : int = use_cache
_lowerCamelCase : List[str] = classifier_dropout
class __snake_case ( _lowercase):
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
if self.task == "multiple-choice":
_lowerCamelCase : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_lowerCamelCase : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 83 | 0 |
from __future__ import annotations
def a ( snake_case__: int ):
'''simple docstring'''
lowercase_ = 2
lowercase_ = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(snake_case__ )
if n > 1:
factors.append(snake_case__ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 97 |
"""simple docstring"""
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCAmelCase__ = '''<<<<<<< This should probably be modified because it mentions: '''
lowerCAmelCase__ = '''=======
>>>>>>>
'''
lowerCAmelCase__ = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
lowerCAmelCase__ = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def snake_case_ ( A_ : Namespace ):
'''simple docstring'''
return ConvertCommand(args.tfds_path, args.datasets_directory )
class __snake_case ( _lowercase):
@staticmethod
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase : ArgumentParser ):
"""simple docstring"""
_lowerCamelCase : List[str] = parser.add_parser(
'''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , )
train_parser.add_argument(
'''--tfds_path''' , type=__lowerCAmelCase , required=__lowerCAmelCase , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , )
train_parser.add_argument(
'''--datasets_directory''' , type=__lowerCAmelCase , required=__lowerCAmelCase , help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=__lowerCAmelCase )
def __init__( self : str , __lowerCAmelCase : str , __lowerCAmelCase : str , *__lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : List[str] = get_logger('''datasets-cli/converting''' )
_lowerCamelCase : int = tfds_path
_lowerCamelCase : Dict = datasets_directory
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
if os.path.isdir(self._tfds_path ):
_lowerCamelCase : Union[str, Any] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
_lowerCamelCase : Dict = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
_lowerCamelCase : int = os.path.abspath(self._datasets_directory )
self._logger.info(f'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
_lowerCamelCase : str = []
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : Union[str, Any] = {}
if os.path.isdir(self._tfds_path ):
_lowerCamelCase : List[str] = os.listdir(__lowerCAmelCase )
else:
_lowerCamelCase : Optional[Any] = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'''Looking at file {f_name}''' )
_lowerCamelCase : Union[str, Any] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Optional[Any] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
if not os.path.isfile(__lowerCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(__lowerCAmelCase , encoding='''utf-8''' ) as f:
_lowerCamelCase : Tuple = f.readlines()
_lowerCamelCase : Optional[int] = []
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : int = False
_lowerCamelCase : Tuple = []
for line in lines:
_lowerCamelCase : Optional[int] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
_lowerCamelCase : Union[str, Any] = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
_lowerCamelCase : List[str] = ''''''
continue
elif "from absl import logging" in out_line:
_lowerCamelCase : str = '''from datasets import logging\n'''
elif "getLogger" in out_line:
_lowerCamelCase : Union[str, Any] = out_line.replace('''getLogger''' , '''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
_lowerCamelCase : Dict = True
_lowerCamelCase : Optional[int] = list(filter(lambda __lowerCAmelCase : e in out_line , __lowerCAmelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(__lowerCAmelCase ) + '''\n''' )
out_lines.append(__lowerCAmelCase )
out_lines.append(__lowerCAmelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
_lowerCamelCase : str = re.sub(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
_lowerCamelCase : Dict = re.match(R'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , __lowerCAmelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
_lowerCamelCase : Union[str, Any] = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
_lowerCamelCase : Any = True
out_lines.append(__lowerCAmelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
_lowerCamelCase : Union[str, Any] = f_name.replace('''.py''' , '''''' )
_lowerCamelCase : List[str] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[Any] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
self._logger.info(f'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(__lowerCAmelCase )
if needs_manual_update:
with_manual_update.append(__lowerCAmelCase )
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.writelines(__lowerCAmelCase )
self._logger.info(f'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
_lowerCamelCase : Optional[int] = os.path.basename(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = imports_to_builder_map[f_name.replace('''.py''' , '''''' )]
self._logger.info(f'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(__lowerCAmelCase , __lowerCAmelCase )
except KeyError:
self._logger.error(f'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 83 | 0 |
'''simple docstring'''
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def a__ ( lowercase : Optional[int] ) -> Union[str, Any]: # picklable for multiprocessing
"""simple docstring"""
return x.sum()
def a__ ( lowercase : Union[str, Any] ) -> List[Any]: # picklable for multiprocessing
"""simple docstring"""
return i + 1
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : int
_snake_case : str
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def snake_case__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = {}
_UpperCamelCase = []
_UpperCamelCase = 1
_UpperCamelCase = [1, 2]
_UpperCamelCase = {'''a''': 1, '''b''': 2}
_UpperCamelCase = {'''a''': [1, 2], '''b''': [3, 4]}
_UpperCamelCase = {'''a''': {'''1''': 1}, '''b''': 2}
_UpperCamelCase = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
_UpperCamelCase = {}
_UpperCamelCase = []
_UpperCamelCase = 2
_UpperCamelCase = [2, 3]
_UpperCamelCase = {'''a''': 2, '''b''': 3}
_UpperCamelCase = {'''a''': [2, 3], '''b''': [4, 5]}
_UpperCamelCase = {'''a''': {'''1''': 2}, '''b''': 3}
_UpperCamelCase = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
self.assertEqual(map_nested(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(map_nested(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(map_nested(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(map_nested(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(map_nested(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(map_nested(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(map_nested(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(map_nested(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ )
_UpperCamelCase = 2
self.assertEqual(map_nested(lowerCAmelCase__ , lowerCAmelCase__ , num_proc=lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(map_nested(lowerCAmelCase__ , lowerCAmelCase__ , num_proc=lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(map_nested(lowerCAmelCase__ , lowerCAmelCase__ , num_proc=lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(map_nested(lowerCAmelCase__ , lowerCAmelCase__ , num_proc=lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(map_nested(lowerCAmelCase__ , lowerCAmelCase__ , num_proc=lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(map_nested(lowerCAmelCase__ , lowerCAmelCase__ , num_proc=lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(map_nested(lowerCAmelCase__ , lowerCAmelCase__ , num_proc=lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(map_nested(lowerCAmelCase__ , lowerCAmelCase__ , num_proc=lowerCAmelCase__ ) , lowerCAmelCase__ )
_UpperCamelCase = {'''a''': np.eye(2 ), '''b''': np.zeros(3 ), '''c''': np.ones(2 )}
_UpperCamelCase = {'''a''': 2, '''b''': 0, '''c''': 2}
_UpperCamelCase = {
'''a''': np.eye(2 ).astype(lowerCAmelCase__ ),
'''b''': np.zeros(3 ).astype(lowerCAmelCase__ ),
'''c''': np.ones(2 ).astype(lowerCAmelCase__ ),
}
self.assertEqual(map_nested(lowerCAmelCase__ , lowerCAmelCase__ , map_numpy=lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(lowerCAmelCase__ , lowerCAmelCase__ , map_numpy=lowerCAmelCase__ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(lowerCAmelCase__ , lowerCAmelCase__ , map_numpy=lowerCAmelCase__ , num_proc=lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(lowerCAmelCase__ , lowerCAmelCase__ , map_numpy=lowerCAmelCase__ , num_proc=lowerCAmelCase__ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(lowerCAmelCase__ ): # can't pickle a local lambda
map_nested(lambda lowerCAmelCase__ : x + 1 , lowerCAmelCase__ , num_proc=lowerCAmelCase__ )
def snake_case__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = {'''a''': 1, '''b''': 2}
_UpperCamelCase = {'''a''': 3, '''b''': 4}
_UpperCamelCase = {'''a''': 5, '''b''': 6}
_UpperCamelCase = sorted([('''a''', (1, 3, 5)), ('''b''', (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) ) , lowerCAmelCase__ )
def snake_case__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : Optional[Any] = 'bar'
_UpperCamelCase = Foo()
self.assertEqual(foo.my_attr , '''bar''' )
with temporary_assignment(lowerCAmelCase__ , '''my_attr''' , '''BAR''' ):
self.assertEqual(foo.my_attr , '''BAR''' )
self.assertEqual(foo.my_attr , '''bar''' )
@pytest.mark.parametrize(
'''iterable_length, num_proc, expected_num_proc''', [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
], )
def a__ ( lowercase : Dict, lowercase : Any, lowercase : List[str] ) -> List[Any]:
"""simple docstring"""
with patch('''datasets.utils.py_utils._single_map_nested''' ) as mock_single_map_nested, patch(
'''datasets.parallel.parallel.Pool''' ) as mock_multiprocessing_pool:
_UpperCamelCase = {F"""{i}""": i for i in range(lowercase )}
_UpperCamelCase = map_nested(lambda lowercase : x + 10, lowercase, num_proc=lowercase, parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
@require_tf
def snake_case__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
import tensorflow as tf
from tensorflow.keras import layers
_UpperCamelCase = layers.Dense(2 )
def gen_random_output():
_UpperCamelCase = tf.random.uniform((1, 3) )
return model(lowerCAmelCase__ ).numpy()
with temp_seed(42 , set_tensorflow=lowerCAmelCase__ ):
_UpperCamelCase = gen_random_output()
with temp_seed(42 , set_tensorflow=lowerCAmelCase__ ):
_UpperCamelCase = gen_random_output()
_UpperCamelCase = gen_random_output()
np.testing.assert_equal(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def snake_case__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
import torch
def gen_random_output():
_UpperCamelCase = torch.nn.Linear(3 , 2 )
_UpperCamelCase = torch.rand(1 , 3 )
return model(lowerCAmelCase__ ).detach().numpy()
with temp_seed(42 , set_pytorch=lowerCAmelCase__ ):
_UpperCamelCase = gen_random_output()
with temp_seed(42 , set_pytorch=lowerCAmelCase__ ):
_UpperCamelCase = gen_random_output()
_UpperCamelCase = gen_random_output()
np.testing.assert_equal(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def snake_case__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
_UpperCamelCase = gen_random_output()
with temp_seed(42 ):
_UpperCamelCase = gen_random_output()
_UpperCamelCase = gen_random_output()
np.testing.assert_equal(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize('''input_data''', [{}] )
def a__ ( lowercase : List[str] ) -> Any:
"""simple docstring"""
_UpperCamelCase = NestedDataStructure(lowercase ).data
assert output_data == input_data
@pytest.mark.parametrize(
'''data, expected_output''', [
({}, []),
([], []),
('''foo''', ['''foo''']),
(['''foo''', '''bar'''], ['''foo''', '''bar''']),
([['''foo''', '''bar''']], ['''foo''', '''bar''']),
([[['''foo'''], ['''bar''']]], ['''foo''', '''bar''']),
([[['''foo'''], '''bar''']], ['''foo''', '''bar''']),
({'''a''': 1, '''b''': 2}, [1, 2]),
({'''a''': [1, 2], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[[3], [4]]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, [4]]}, [1, 2, 3, 4]),
({'''a''': {'''1''': 1}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': [2]}, [1, 2]),
], )
def a__ ( lowercase : str, lowercase : Any ) -> str:
"""simple docstring"""
_UpperCamelCase = NestedDataStructure(lowercase ).flatten()
assert output == expected_output
def a__ ( ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = A(x=1, y='''foobar''' )
_UpperCamelCase = {'''x''': 1, '''y''': '''foobar'''}
assert asdict(lowercase ) == expected_output
_UpperCamelCase = {'''a''': {'''b''': A(x=10, y='''foo''' )}, '''c''': [A(x=20, y='''bar''' )]}
_UpperCamelCase = {'''a''': {'''b''': {'''x''': 10, '''y''': '''foo'''}}, '''c''': [{'''x''': 20, '''y''': '''bar'''}]}
assert asdict(lowercase ) == expected_output
with pytest.raises(lowercase ):
asdict([1, A(x=10, y='''foo''' )] )
def a__ ( lowercase : str ) -> str:
"""simple docstring"""
return text.split()
def a__ ( lowercase : List[str] ) -> List[Any]:
"""simple docstring"""
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def a__ ( ) -> str:
"""simple docstring"""
with Pool(2 ) as pool:
_UpperCamelCase = list(iflatmap_unordered(lowercase, _split_text, kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(lowercase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
_UpperCamelCase = list(iflatmap_unordered(lowercase, _split_text, kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(lowercase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
_UpperCamelCase = []
for yield_time, content in iflatmap_unordered(
lowercase, _aseconds_generator_of_aitems_with_timing, kwargs_iterable=[{'''content''': '''a'''}, {'''content''': '''b'''}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(lowercase )
assert out.count('''a''' ) == 2
assert out.count('''b''' ) == 2
assert len(lowercase ) == 4
| 98 |
"""simple docstring"""
def snake_case_ ( A_ : list, A_ : list, A_ : int, A_ : int, A_ : int ):
'''simple docstring'''
if index == number_of_items:
return 0
_lowerCamelCase : int = 0
_lowerCamelCase : str = 0
_lowerCamelCase : Dict = knapsack(A_, A_, A_, A_, index + 1 )
if weights[index] <= max_weight:
_lowerCamelCase : Tuple = values[index] + knapsack(
A_, A_, A_, max_weight - weights[index], index + 1 )
return max(A_, A_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 | 0 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'spiece.model'}
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
}
}
SCREAMING_SNAKE_CASE = {
'albert-base-v1': 5_1_2,
'albert-large-v1': 5_1_2,
'albert-xlarge-v1': 5_1_2,
'albert-xxlarge-v1': 5_1_2,
'albert-base-v2': 5_1_2,
'albert-large-v2': 5_1_2,
'albert-xlarge-v2': 5_1_2,
'albert-xxlarge-v2': 5_1_2,
}
SCREAMING_SNAKE_CASE = '▁'
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __A , __A=True , __A=True , __A=False , __A="[CLS]" , __A="[SEP]" , __A="<unk>" , __A="[SEP]" , __A="<pad>" , __A="[CLS]" , __A="[MASK]" , __A = None , **__A , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__a = (
AddedToken(__A , lstrip=__A , rstrip=__A , normalized=__A )
if isinstance(__A , __A )
else mask_token
)
__a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__A , remove_space=__A , keep_accents=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , )
__a = do_lower_case
__a = remove_space
__a = keep_accents
__a = vocab_file
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__A )
@property
def snake_case_ ( self ):
return len(self.sp_model )
def snake_case_ ( self ):
__a = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
__a = self.__dict__.copy()
__a = None
return state
def __setstate__( self , __A ):
__a = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__a = {}
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case_ ( self , __A ):
if self.remove_space:
__a = """ """.join(inputs.strip().split() )
else:
__a = inputs
__a = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
__a = unicodedata.normalize("""NFKD""" , __A )
__a = """""".join([c for c in outputs if not unicodedata.combining(__A )] )
if self.do_lower_case:
__a = outputs.lower()
return outputs
def snake_case_ ( self , __A ):
__a = self.preprocess_text(__A )
__a = self.sp_model.encode(__A , out_type=__A )
__a = []
for piece in pieces:
if len(__A ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
__a = self.sp_model.EncodeAsPieces(piece[:-1].replace(__A , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__a = cur_pieces[1:]
else:
__a = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__A )
else:
new_pieces.append(__A )
return new_pieces
def snake_case_ ( self , __A ):
return self.sp_model.PieceToId(__A )
def snake_case_ ( self , __A ):
return self.sp_model.IdToPiece(__A )
def snake_case_ ( self , __A ):
__a = []
__a = """"""
__a = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__A ) + token
__a = True
__a = []
else:
current_sub_tokens.append(__A )
__a = False
out_string += self.sp_model.decode(__A )
return out_string.strip()
def snake_case_ ( self , __A , __A = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case_ ( self , __A , __A = None , __A = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is not None:
return [1] + ([0] * len(__A )) + [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1]
def snake_case_ ( self , __A , __A = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case_ ( self , __A , __A = None ):
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a = os.path.join(
__A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __A )
elif not os.path.isfile(self.vocab_file ):
with open(__A , """wb""" ) as fi:
__a = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
| 99 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __snake_case ( metaclass=_lowercase):
snake_case__ : Optional[Any] = ["transformers", "torch", "note_seq"]
def __init__( self : Union[str, Any] , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : List[Any] ):
"""simple docstring"""
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[Any] , *__lowerCAmelCase : str , **__lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : int , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 83 | 0 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> Optional[int]:
if attention_mask is None:
SCREAMING_SNAKE_CASE__ = tf.cast(tf.math.not_equal(lowerCAmelCase_ , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class __snake_case :
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = OPTConfig
lowerCamelCase__ : str = {}
lowerCamelCase__ : List[Any] = """gelu"""
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=False , A_=99 , A_=16 , A_=2 , A_=4 , A_=4 , A_="gelu" , A_=0.1 , A_=0.1 , A_=20 , A_=2 , A_=1 , A_=0 , A_=16 , A_=16 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = eos_token_id
SCREAMING_SNAKE_CASE__ = pad_token_id
SCREAMING_SNAKE_CASE__ = bos_token_id
SCREAMING_SNAKE_CASE__ = embed_dim
SCREAMING_SNAKE_CASE__ = word_embed_proj_dim
SCREAMING_SNAKE_CASE__ = False
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE__ = tf.concat([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE__ = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=A_ , **self.config_updates , )
SCREAMING_SNAKE_CASE__ = prepare_opt_inputs_dict(A_ , A_ )
return config, inputs_dict
def lowercase_ ( self , A_ , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFOPTModel(config=A_ )
SCREAMING_SNAKE_CASE__ = inputs_dict['''input_ids''']
SCREAMING_SNAKE_CASE__ = input_ids[:1, :]
SCREAMING_SNAKE_CASE__ = inputs_dict['''attention_mask'''][:1, :]
SCREAMING_SNAKE_CASE__ = 1
# first forward pass
SCREAMING_SNAKE_CASE__ = model(A_ , attention_mask=A_ , use_cache=A_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ = tf.concat([input_ids, next_tokens] , axis=-1 )
SCREAMING_SNAKE_CASE__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
SCREAMING_SNAKE_CASE__ = model(A_ , attention_mask=A_ )[0]
SCREAMING_SNAKE_CASE__ = model(A_ , attention_mask=A_ , past_key_values=A_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE__ = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A_ , A_ , rtol=1E-3 )
@require_tf
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Dict = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
lowerCamelCase__ : List[str] = (TFOPTForCausalLM,) if is_tf_available() else ()
lowerCamelCase__ : Optional[int] = (
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
lowerCamelCase__ : int = False
lowerCamelCase__ : str = False
lowerCamelCase__ : str = False
lowerCamelCase__ : Tuple = 1_0
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFOPTModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=A_ )
def lowercase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(A_ , A_ ):
if hasattr(A_ , '''weight''' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(A_ , '''weight''' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
SCREAMING_SNAKE_CASE__ = model_class(config=A_ )
SCREAMING_SNAKE_CASE__ = _get_word_embedding_weight(A_ , model.get_input_embeddings() )
SCREAMING_SNAKE_CASE__ = _get_word_embedding_weight(A_ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(A_ )
SCREAMING_SNAKE_CASE__ = _get_word_embedding_weight(A_ , model.get_input_embeddings() )
SCREAMING_SNAKE_CASE__ = _get_word_embedding_weight(A_ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
SCREAMING_SNAKE_CASE__ = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , A_ )
# check that weights remain the same after resizing
SCREAMING_SNAKE_CASE__ = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
SCREAMING_SNAKE_CASE__ = False
self.assertTrue(A_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , A_ )
SCREAMING_SNAKE_CASE__ = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
SCREAMING_SNAKE_CASE__ = False
self.assertTrue(A_ )
def __snake_case ( lowerCAmelCase_ ) -> List[str]:
return tf.constant(lowerCAmelCase_ , dtype=tf.intaa )
@require_tf
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = 9_9
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = tf.ones((4, 1) , dtype=tf.intaa ) * 2
SCREAMING_SNAKE_CASE__ = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
SCREAMING_SNAKE_CASE__ = input_ids.shape[0]
SCREAMING_SNAKE_CASE__ = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFOPTModel.from_pretrained('''facebook/opt-350m''' )
SCREAMING_SNAKE_CASE__ = _long_tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
SCREAMING_SNAKE_CASE__ = tf.not_equal(A_ , model.config.pad_token_id )
with tf.GradientTape():
SCREAMING_SNAKE_CASE__ = model(input_ids=A_ , attention_mask=A_ ).last_hidden_state
SCREAMING_SNAKE_CASE__ = (1, 11, 5_12)
self.assertEqual(output.shape , A_ )
SCREAMING_SNAKE_CASE__ = tf.constant(
[[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , A_ , atol=4E-3 ) )
SCREAMING_SNAKE_CASE__ = tf.function(A_ , jit_compile=A_ )
SCREAMING_SNAKE_CASE__ = xla_generate(A_ , A_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , A_ , atol=4E-2 ) )
@require_tf
@slow
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE__ = '''facebook/opt-350m'''
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFOPTForCausalLM.from_pretrained(self.path_model )
SCREAMING_SNAKE_CASE__ = GPTaTokenizer.from_pretrained(self.path_model )
SCREAMING_SNAKE_CASE__ = [
'''Today is a beautiful day and I want to''',
'''In the city of''',
'''Paris is the capital of France and''',
'''Computers and mobile phones have taken''',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
SCREAMING_SNAKE_CASE__ = tokenizer(A_ , return_tensors='''tf''' , padding=A_ , add_special_tokens=A_ )
SCREAMING_SNAKE_CASE__ = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
SCREAMING_SNAKE_CASE__ = tf.constant(
[
[1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
] )
self.assertTrue(np.allclose(A_ , A_ , atol=1E-4 ) )
SCREAMING_SNAKE_CASE__ = tf.function(A_ , jit_compile=A_ )
SCREAMING_SNAKE_CASE__ = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(A_ , A_ , atol=1E-4 ) )
@require_tf
@slow
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@property
def lowercase_ ( self ):
'''simple docstring'''
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = '''facebook/opt-125m'''
SCREAMING_SNAKE_CASE__ = [
'''Today is a beautiful day and I want to''',
'''In the city of New York, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = GPTaTokenizer.from_pretrained(A_ )
SCREAMING_SNAKE_CASE__ = TFOPTForCausalLM.from_pretrained(A_ )
for prompt in self.prompts:
SCREAMING_SNAKE_CASE__ = tokenizer(A_ , return_tensors='''tf''' ).input_ids
SCREAMING_SNAKE_CASE__ = model.generate(A_ , max_length=10 )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(A_ , skip_special_tokens=A_ )
predicted_outputs += generated_string
self.assertListEqual(A_ , A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = '''facebook/opt-350m'''
SCREAMING_SNAKE_CASE__ = GPTaTokenizer.from_pretrained(A_ )
SCREAMING_SNAKE_CASE__ = TFOPTForCausalLM.from_pretrained(A_ )
SCREAMING_SNAKE_CASE__ = '''left'''
# use different length sentences to test batching
SCREAMING_SNAKE_CASE__ = [
'''Hello, my dog is a little''',
'''Today, I''',
]
SCREAMING_SNAKE_CASE__ = tokenizer(A_ , return_tensors='''tf''' , padding=A_ )
SCREAMING_SNAKE_CASE__ = inputs['''input_ids''']
SCREAMING_SNAKE_CASE__ = model.generate(input_ids=A_ , attention_mask=inputs['''attention_mask'''] )
SCREAMING_SNAKE_CASE__ = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
SCREAMING_SNAKE_CASE__ = model.generate(input_ids=A_ )
SCREAMING_SNAKE_CASE__ = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['''attention_mask'''][-1] , tf.intaa ) )
SCREAMING_SNAKE_CASE__ = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
SCREAMING_SNAKE_CASE__ = model.generate(input_ids=A_ , max_length=model.config.max_length - num_paddings )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(A_ , skip_special_tokens=A_ )
SCREAMING_SNAKE_CASE__ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=A_ )
SCREAMING_SNAKE_CASE__ = tokenizer.decode(output_padded[0] , skip_special_tokens=A_ )
SCREAMING_SNAKE_CASE__ = [
'''Hello, my dog is a little bit of a dork.\nI\'m a little bit''',
'''Today, I was in the middle of a conversation with a friend about the''',
]
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , [non_padded_sentence, padded_sentence] )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = '''facebook/opt-350m'''
SCREAMING_SNAKE_CASE__ = [
'''Today is a beautiful day and I want to''',
'''In the city of San Francisco, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = GPTaTokenizer.from_pretrained(A_ )
SCREAMING_SNAKE_CASE__ = TFOPTForCausalLM.from_pretrained(A_ )
for prompt in self.prompts:
SCREAMING_SNAKE_CASE__ = tokenizer(A_ , return_tensors='''tf''' ).input_ids
SCREAMING_SNAKE_CASE__ = model.generate(A_ , max_length=10 )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(A_ , skip_special_tokens=A_ )
predicted_outputs += generated_string
self.assertListEqual(A_ , A_ )
| 100 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __snake_case ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Any = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
_lowerCamelCase : Any = AutoTokenizer.from_pretrained('''google/mt5-small''' )
_lowerCamelCase : Union[str, Any] = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
_lowerCamelCase : Optional[int] = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
_lowerCamelCase : List[Any] = shift_tokens_right(__lowerCAmelCase , model.config.pad_token_id , model.config.decoder_start_token_id )
_lowerCamelCase : int = model(__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase ).logits
_lowerCamelCase : Optional[Any] = optax.softmax_cross_entropy(__lowerCAmelCase , onehot(__lowerCAmelCase , logits.shape[-1] ) ).mean()
_lowerCamelCase : Dict = -(labels.shape[-1] * loss.item())
_lowerCamelCase : Dict = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 83 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = ["""image_processor""", """tokenizer"""]
_UpperCAmelCase = """ViTImageProcessor"""
_UpperCAmelCase = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_ : Optional[int] = kwargs.pop('feature_extractor' )
SCREAMING_SNAKE_CASE_ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
def __call__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ ):
"""simple docstring"""
if text is None and visual_prompt is None and images is None:
raise ValueError('You have to specify either text, visual prompt or images.' )
if text is not None and visual_prompt is not None:
raise ValueError('You have to specify exactly one type of prompt. Either text or visual prompt.' )
if text is not None:
SCREAMING_SNAKE_CASE_ : Optional[int] = self.tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
if visual_prompt is not None:
SCREAMING_SNAKE_CASE_ : int = self.image_processor(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
if images is not None:
SCREAMING_SNAKE_CASE_ : Dict = self.image_processor(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
if visual_prompt is not None and images is not None:
SCREAMING_SNAKE_CASE_ : List[str] = {
'pixel_values': image_features.pixel_values,
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
SCREAMING_SNAKE_CASE_ : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
SCREAMING_SNAKE_CASE_ : Tuple = {
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase__ ) , tensor_type=lowerCAmelCase__ )
def UpperCamelCase__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def UpperCamelCase__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowerCAmelCase__ , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowerCAmelCase__ , )
return self.image_processor
| 101 |
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'''files''', [
['''full:README.md''', '''dataset_infos.json'''],
['''empty:README.md''', '''dataset_infos.json'''],
['''dataset_infos.json'''],
['''full:README.md'''],
], )
def snake_case_ ( A_ : Dict, A_ : List[str] ):
'''simple docstring'''
_lowerCamelCase : int = tmp_path_factory.mktemp('''dset_infos_dir''' )
if "full:README.md" in files:
with open(dataset_infos_dir / '''README.md''', '''w''' ) as f:
f.write('''---\ndataset_info:\n dataset_size: 42\n---''' )
if "empty:README.md" in files:
with open(dataset_infos_dir / '''README.md''', '''w''' ) as f:
f.write('''''' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / '''dataset_infos.json''', '''w''' ) as f:
f.write('''{"default": {"dataset_size": 42}}''' )
_lowerCamelCase : str = DatasetInfosDict.from_directory(A_ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'''dataset_info''', [
DatasetInfo(),
DatasetInfo(
description='''foo''', features=Features({'''a''': Value('''int32''' )} ), builder_name='''builder''', config_name='''config''', version='''1.0.0''', splits=[{'''name''': '''train'''}], download_size=42, ),
], )
def snake_case_ ( A_ : str, A_ : DatasetInfo ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = str(A_ )
dataset_info.write_to_directory(A_ )
_lowerCamelCase : str = DatasetInfo.from_directory(A_ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(A_, '''dataset_info.json''' ) )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = DatasetInfo(
description='''foo''', citation='''bar''', homepage='''https://foo.bar''', license='''CC0''', features=Features({'''a''': Value('''int32''' )} ), post_processed={}, supervised_keys=(), task_templates=[], builder_name='''builder''', config_name='''config''', version='''1.0.0''', splits=[{'''name''': '''train''', '''num_examples''': 42}], download_checksums={}, download_size=13_37, post_processing_size=4_42, dataset_size=12_34, size_in_bytes=13_37 + 4_42 + 12_34, )
_lowerCamelCase : Optional[Any] = dataset_info._to_yaml_dict()
assert sorted(A_ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key], (list, dict, int, str) )
_lowerCamelCase : str = yaml.safe_dump(A_ )
_lowerCamelCase : Tuple = yaml.safe_load(A_ )
assert dataset_info_yaml_dict == reloaded
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : int = DatasetInfo()
_lowerCamelCase : Dict = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'''dataset_infos_dict''', [
DatasetInfosDict(),
DatasetInfosDict({'''default''': DatasetInfo()} ),
DatasetInfosDict({'''my_config_name''': DatasetInfo()} ),
DatasetInfosDict(
{
'''default''': DatasetInfo(
description='''foo''', features=Features({'''a''': Value('''int32''' )} ), builder_name='''builder''', config_name='''config''', version='''1.0.0''', splits=[{'''name''': '''train'''}], download_size=42, )
} ),
DatasetInfosDict(
{
'''v1''': DatasetInfo(dataset_size=42 ),
'''v2''': DatasetInfo(dataset_size=13_37 ),
} ),
], )
def snake_case_ ( A_ : Optional[Any], A_ : DatasetInfosDict ):
'''simple docstring'''
_lowerCamelCase : List[str] = str(A_ )
dataset_infos_dict.write_to_directory(A_ )
_lowerCamelCase : List[Any] = DatasetInfosDict.from_directory(A_ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_lowerCamelCase : str = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_lowerCamelCase : Any = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(A_, '''README.md''' ) )
| 83 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__magic_name__ : str = logging.get_logger(__name__)
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
if isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(SCREAMING_SNAKE_CASE ):
return [[videos]]
raise ValueError(f"""Could not make batched video from {videos}""" )
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase : int = ["""pixel_values"""]
def __init__( self , _A = True , _A = None , _A = PILImageResampling.BILINEAR , _A = True , _A = None , _A = True , _A = 1 / 2_5_5 , _A = True , _A = None , _A = None , **_A , ):
'''simple docstring'''
super().__init__(**_A )
UpperCamelCase : Optional[Any] = size if size is not None else {"""shortest_edge""": 2_2_4}
UpperCamelCase : List[Any] = get_size_dict(_A , default_to_square=_A )
UpperCamelCase : Dict = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
UpperCamelCase : Optional[Any] = get_size_dict(_A , param_name="""crop_size""" )
UpperCamelCase : Dict = do_resize
UpperCamelCase : List[Any] = size
UpperCamelCase : Tuple = do_center_crop
UpperCamelCase : Any = crop_size
UpperCamelCase : int = resample
UpperCamelCase : str = do_rescale
UpperCamelCase : Optional[int] = rescale_factor
UpperCamelCase : Tuple = do_normalize
UpperCamelCase : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase : str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _a ( self , _A , _A , _A = PILImageResampling.BILINEAR , _A = None , **_A , ):
'''simple docstring'''
UpperCamelCase : int = get_size_dict(_A , default_to_square=_A )
if "shortest_edge" in size:
UpperCamelCase : List[Any] = get_resize_output_image_size(_A , size["""shortest_edge"""] , default_to_square=_A )
elif "height" in size and "width" in size:
UpperCamelCase : List[str] = (size["""height"""], size["""width"""])
else:
raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def _a ( self , _A , _A , _A = None , **_A , ):
'''simple docstring'''
UpperCamelCase : Any = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(_A , size=(size["""height"""], size["""width"""]) , data_format=_A , **_A )
def _a ( self , _A , _A , _A = None , **_A , ):
'''simple docstring'''
return rescale(_A , scale=_A , data_format=_A , **_A )
def _a ( self , _A , _A , _A , _A = None , **_A , ):
'''simple docstring'''
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def _a ( self , _A , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = ChannelDimension.FIRST , ):
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
UpperCamelCase : List[Any] = to_numpy_array(_A )
if do_resize:
UpperCamelCase : Optional[int] = self.resize(image=_A , size=_A , resample=_A )
if do_center_crop:
UpperCamelCase : Optional[Any] = self.center_crop(_A , size=_A )
if do_rescale:
UpperCamelCase : List[str] = self.rescale(image=_A , scale=_A )
if do_normalize:
UpperCamelCase : Dict = self.normalize(image=_A , mean=_A , std=_A )
UpperCamelCase : Optional[int] = to_channel_dimension_format(_A , _A )
return image
def _a ( self , _A , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = ChannelDimension.FIRST , **_A , ):
'''simple docstring'''
UpperCamelCase : int = do_resize if do_resize is not None else self.do_resize
UpperCamelCase : Tuple = resample if resample is not None else self.resample
UpperCamelCase : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase : str = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase : Any = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase : str = image_mean if image_mean is not None else self.image_mean
UpperCamelCase : Dict = image_std if image_std is not None else self.image_std
UpperCamelCase : List[str] = size if size is not None else self.size
UpperCamelCase : Optional[Any] = get_size_dict(_A , default_to_square=_A )
UpperCamelCase : List[str] = crop_size if crop_size is not None else self.crop_size
UpperCamelCase : List[Any] = get_size_dict(_A , param_name="""crop_size""" )
if not valid_images(_A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
UpperCamelCase : List[Any] = make_batched(_A )
UpperCamelCase : Optional[int] = [
[
self._preprocess_image(
image=_A , do_resize=_A , size=_A , resample=_A , do_center_crop=_A , crop_size=_A , do_rescale=_A , rescale_factor=_A , do_normalize=_A , image_mean=_A , image_std=_A , data_format=_A , )
for img in video
]
for video in videos
]
UpperCamelCase : List[str] = {"""pixel_values""": videos}
return BatchFeature(data=_A , tensor_type=_A )
| 102 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class __snake_case :
def __init__( self : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple=1_3 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : List[str]=2_4 , __lowerCAmelCase : str=1_6 , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Optional[Any]=3_2 , __lowerCAmelCase : List[Any]=5 , __lowerCAmelCase : int=4 , __lowerCAmelCase : int=3_7 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : int=1_0 , __lowerCAmelCase : List[Any]=0.02 , __lowerCAmelCase : str=None , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Union[str, Any]=2 , ):
"""simple docstring"""
_lowerCamelCase : List[str] = parent
_lowerCamelCase : str = batch_size
_lowerCamelCase : Tuple = patch_size
_lowerCamelCase : Optional[int] = max_length
_lowerCamelCase : List[Any] = num_mel_bins
_lowerCamelCase : int = is_training
_lowerCamelCase : Union[str, Any] = use_labels
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : Tuple = num_hidden_layers
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : Tuple = intermediate_size
_lowerCamelCase : List[str] = hidden_act
_lowerCamelCase : Dict = hidden_dropout_prob
_lowerCamelCase : int = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = type_sequence_label_size
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : List[str] = scope
_lowerCamelCase : Optional[int] = frequency_stride
_lowerCamelCase : List[Any] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_lowerCamelCase : Optional[int] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
_lowerCamelCase : Union[str, Any] = (self.max_length - self.patch_size) // self.time_stride + 1
_lowerCamelCase : Any = frequency_out_dimension * time_out_dimension
_lowerCamelCase : List[Any] = num_patches + 2
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : int = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
_lowerCamelCase : str = None
if self.use_labels:
_lowerCamelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : Optional[int] = self.get_config()
return config, input_values, labels
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : List[Any] = ASTModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : List[Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : int = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Optional[Any] = config_and_inputs
_lowerCamelCase : int = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : List[Any] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
snake_case__ : Tuple = (
{"audio-classification": ASTForAudioClassification, "feature-extraction": ASTModel}
if is_torch_available()
else {}
)
snake_case__ : Any = False
snake_case__ : List[Any] = False
snake_case__ : Optional[Any] = False
snake_case__ : Optional[Any] = False
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = ASTModelTester(self )
_lowerCamelCase : Any = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Dict = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCamelCase : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : str = model_class(__lowerCAmelCase )
_lowerCamelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Any = [*signature.parameters.keys()]
_lowerCamelCase : str = ['''input_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Union[str, Any] = ASTModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : List[str] = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''', filename='''sample_audio.flac''', repo_type='''dataset''' )
_lowerCamelCase , _lowerCamelCase : str = torchaudio.load(A_ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class __snake_case ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : int = self.default_feature_extractor
_lowerCamelCase : Union[str, Any] = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(__lowerCAmelCase )
_lowerCamelCase : List[Any] = self.default_feature_extractor
_lowerCamelCase , _lowerCamelCase : List[Any] = prepare_audio()
_lowerCamelCase : Dict = audio.squeeze().numpy()
_lowerCamelCase : Tuple = feature_extractor(__lowerCAmelCase , sampling_rate=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCamelCase : Tuple = model(**__lowerCAmelCase )
# verify the logits
_lowerCamelCase : Tuple = torch.Size((1, 5_2_7) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
_lowerCamelCase : Optional[int] = torch.tensor([-0.87_60, -7.00_42, -8.66_02] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
| 83 | 0 |
"""simple docstring"""
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> float:
return round(float(moles / volume ) * nfactor )
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> float:
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> float:
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> float:
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 103 |
"""simple docstring"""
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def snake_case_ ( A_ : Dict, A_ : bool = True, A_ : float = math.inf, A_ : float = -math.inf, A_ : float = math.inf, A_ : float = -math.inf, A_ : bool = False, A_ : float = 1_00, A_ : float = 0.01, A_ : float = 1, ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : str = search_prob
_lowerCamelCase : str = start_temperate
_lowerCamelCase : Optional[Any] = []
_lowerCamelCase : int = 0
_lowerCamelCase : Any = None
while not search_end:
_lowerCamelCase : Dict = current_state.score()
if best_state is None or current_score > best_state.score():
_lowerCamelCase : Tuple = current_state
scores.append(A_ )
iterations += 1
_lowerCamelCase : List[Any] = None
_lowerCamelCase : Optional[int] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_lowerCamelCase : List[Any] = random.randint(0, len(A_ ) - 1 ) # picking a random neighbor
_lowerCamelCase : Dict = neighbors.pop(A_ )
_lowerCamelCase : Union[str, Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_lowerCamelCase : str = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_lowerCamelCase : Optional[Any] = picked_neighbor
else:
_lowerCamelCase : Optional[int] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_lowerCamelCase : Union[str, Any] = picked_neighbor
_lowerCamelCase : List[str] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_lowerCamelCase : Tuple = True
else:
_lowerCamelCase : Optional[Any] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(A_ ), A_ )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def snake_case_ ( A_ : int, A_ : Tuple ):
'''simple docstring'''
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase__ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase__ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def snake_case_ ( A_ : Optional[int], A_ : List[Any] ):
'''simple docstring'''
return (3 * x**2) - (6 * y)
lowerCAmelCase__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F"""{local_min.score()}"""
)
lowerCAmelCase__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F"""{local_min.score()}"""
)
| 83 | 0 |
"""simple docstring"""
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def _lowerCamelCase ( UpperCAmelCase_ : Union[str, Any], UpperCAmelCase_ : str, UpperCAmelCase_ : List[Any], UpperCAmelCase_ : Any ) -> Any:
"""simple docstring"""
if isinstance(UpperCAmelCase_, UpperCAmelCase_ ):
A__ = np.full((len(UpperCAmelCase_ ), sequence_length, 2), UpperCAmelCase_ )
else:
A__ = np.full((len(UpperCAmelCase_ ), sequence_length), UpperCAmelCase_ )
for i, tensor in enumerate(UpperCAmelCase_ ):
if padding_side == "right":
if isinstance(UpperCAmelCase_, UpperCAmelCase_ ):
A__ = tensor[:sequence_length]
else:
A__ = tensor[:sequence_length]
else:
if isinstance(UpperCAmelCase_, UpperCAmelCase_ ):
A__ = tensor[:sequence_length]
else:
A__ = tensor[:sequence_length]
return out_tensor.tolist()
def _lowerCamelCase ( UpperCAmelCase_ : int ) -> List[Any]:
"""simple docstring"""
A__ = ord(UpperCAmelCase_ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
A__ = unicodedata.category(UpperCAmelCase_ )
if cat.startswith("P" ):
return True
return False
@dataclass
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : PreTrainedTokenizerBase
A__ : Union[bool, str, PaddingStrategy] = True
A__ : Optional[int] = None
A__ : Optional[int] = None
A__ : int = -1_0_0
A__ : str = "pt"
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
import torch
A__ = "label" if "label" in features[0].keys() else "labels"
A__ = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
A__ = self.tokenizer.pad(
SCREAMING_SNAKE_CASE__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" if labels is None else None , )
if labels is None:
return batch
A__ = torch.tensor(batch["entity_ids"] ).shape[1]
A__ = self.tokenizer.padding_side
if padding_side == "right":
A__ = [
list(SCREAMING_SNAKE_CASE__ ) + [self.label_pad_token_id] * (sequence_length - len(SCREAMING_SNAKE_CASE__ )) for label in labels
]
else:
A__ = [
[self.label_pad_token_id] * (sequence_length - len(SCREAMING_SNAKE_CASE__ )) + list(SCREAMING_SNAKE_CASE__ ) for label in labels
]
A__ = [feature["ner_tags"] for feature in features]
A__ = padding_tensor(SCREAMING_SNAKE_CASE__ , -1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = [feature["original_entity_spans"] for feature in features]
A__ = padding_tensor(SCREAMING_SNAKE_CASE__ , (-1, -1) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = {k: torch.tensor(SCREAMING_SNAKE_CASE__ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 104 |
"""simple docstring"""
from collections import namedtuple
lowerCAmelCase__ = namedtuple('''from_to''', '''from_ to''')
lowerCAmelCase__ = {
'''cubicmeter''': from_to(1, 1),
'''litre''': from_to(0.0_0_1, 1000),
'''kilolitre''': from_to(1, 1),
'''gallon''': from_to(0.0_0_4_5_4, 2_6_4.1_7_2),
'''cubicyard''': from_to(0.7_6_4_5_5, 1.3_0_7_9_5),
'''cubicfoot''': from_to(0.0_2_8, 3_5.3_1_4_7),
'''cup''': from_to(0.0_0_0_2_3_6_5_8_8, 4_2_2_6.7_5),
}
def snake_case_ ( A_ : float, A_ : str, A_ : str ):
'''simple docstring'''
if from_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ ''', '''.join(A_ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ ''', '''.join(A_ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 | 0 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
UpperCamelCase__ : int = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
UpperCamelCase__ : Dict = ''' def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
'''
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir ,'models/bert/' ) )
SCREAMING_SNAKE_CASE_ : Any = self.transformer_dir
shutil.copy(
os.path.join(snake_case__ ,'src/transformers/models/bert/modeling_bert.py' ) ,os.path.join(self.transformer_dir ,'models/bert/modeling_bert.py' ) ,)
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = 'src/transformers'
shutil.rmtree(self.transformer_dir )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__=None ):
SCREAMING_SNAKE_CASE_ : Dict = comment + F'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = comment + F'\nclass {class_name}(nn.Module):\n' + overwrite_result
SCREAMING_SNAKE_CASE_ : List[str] = black.Mode(target_versions={black.TargetVersion.PYaa} ,line_length=119 )
SCREAMING_SNAKE_CASE_ : Optional[int] = black.format_str(snake_case__ ,mode=snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = os.path.join(self.transformer_dir ,'new_code.py' )
with open(snake_case__ ,'w' ,newline='\n' ) as f:
f.write(snake_case__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(snake_case__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name ,overwrite=snake_case__ )
with open(snake_case__ ,'r' ) as f:
self.assertTrue(f.read() ,snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = check_copies.find_code_in_transformers('models.bert.modeling_bert.BertLMPredictionHead' )
self.assertEqual(snake_case__ ,snake_case__ )
def snake_case ( self ):
# Base copy consistency
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' ,'BertLMPredictionHead' ,REFERENCE_CODE + '\n' ,)
# With no empty line at the end
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' ,'BertLMPredictionHead' ,snake_case__ ,)
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' ,'TestModelLMPredictionHead' ,re.sub('Bert' ,'TestModel' ,snake_case__ ) ,)
# Copy consistency with a really long name
SCREAMING_SNAKE_CASE_ : str = 'TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
F'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}' ,F'{long_class_name}LMPredictionHead' ,re.sub('Bert' ,snake_case__ ,snake_case__ ) ,)
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' ,'TestModelLMPredictionHead' ,snake_case__ ,overwrite_result=re.sub('Bert' ,'TestModel' ,snake_case__ ) ,)
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = check_copies.LOCALIZED_READMES['README_zh-hans.md']
SCREAMING_SNAKE_CASE_ : List[Any] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'
' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'
' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'
' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'
' Luong, Quoc V. Le, Christopher D. Manning.'
)
SCREAMING_SNAKE_CASE_ : Optional[int] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
SCREAMING_SNAKE_CASE_ : List[Any] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'
' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'
' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'
' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'
' Christopher D. Manning 发布。\n'
)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = check_copies.convert_to_localized_md(
snake_case__ ,snake_case__ ,localized_readme['format_model_list'] )
self.assertFalse(snake_case__ )
self.assertEqual(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = check_copies.convert_to_localized_md(
snake_case__ ,snake_case__ ,localized_readme['format_model_list'] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(snake_case__ )
SCREAMING_SNAKE_CASE_ : int = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
'1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'
' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
SCREAMING_SNAKE_CASE_ : str = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = check_copies.convert_to_localized_md(
snake_case__ ,snake_case__ ,localized_readme['format_model_list'] )
# Check if the model link is synchronized.
self.assertEqual(snake_case__ ,snake_case__ )
| 105 |
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case ( _lowercase):
def __init__( self : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : int=1_3 , __lowerCAmelCase : Optional[int]=7 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : str=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : int=True , __lowerCAmelCase : List[str]=9_9 , __lowerCAmelCase : List[Any]=3_2 , __lowerCAmelCase : Union[str, Any]=5 , __lowerCAmelCase : List[str]=4 , __lowerCAmelCase : Optional[int]=3_7 , __lowerCAmelCase : List[Any]="gelu" , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : int=5_1_2 , __lowerCAmelCase : Tuple=1_6 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Any="None" , __lowerCAmelCase : str=3 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : Optional[Any]=None , ):
"""simple docstring"""
_lowerCamelCase : Dict = parent
_lowerCamelCase : Union[str, Any] = batch_size
_lowerCamelCase : Optional[Any] = seq_length
_lowerCamelCase : Optional[Any] = is_training
_lowerCamelCase : Dict = use_input_mask
_lowerCamelCase : Tuple = use_token_type_ids
_lowerCamelCase : Optional[Any] = use_labels
_lowerCamelCase : List[str] = vocab_size
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : int = num_hidden_layers
_lowerCamelCase : Optional[Any] = num_attention_heads
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : int = hidden_dropout_prob
_lowerCamelCase : Dict = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : List[Any] = type_sequence_label_size
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : Optional[int] = num_labels
_lowerCamelCase : Any = num_choices
_lowerCamelCase : int = relative_attention
_lowerCamelCase : Union[str, Any] = position_biased_input
_lowerCamelCase : str = pos_att_type
_lowerCamelCase : Tuple = scope
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : List[Any] = None
if self.use_input_mask:
_lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_lowerCamelCase : Any = None
if self.use_token_type_ids:
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase : Any = None
_lowerCamelCase : int = None
_lowerCamelCase : Union[str, Any] = None
if self.use_labels:
_lowerCamelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices )
_lowerCamelCase : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : str ):
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : List[str] = DebertaVaModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Dict = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )[0]
_lowerCamelCase : str = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )[0]
_lowerCamelCase : List[Any] = model(__lowerCAmelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : Tuple = DebertaVaForMaskedLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Optional[int] = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.num_labels
_lowerCamelCase : Dict = DebertaVaForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Dict = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.num_labels
_lowerCamelCase : Tuple = DebertaVaForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Any = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[str] = DebertaVaForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Tuple = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = DebertaVaForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : List[Any] = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Any = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCamelCase : Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : int = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
snake_case__ : int = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : List[str] = True
snake_case__ : List[Any] = False
snake_case__ : int = False
snake_case__ : Optional[Any] = False
snake_case__ : str = False
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : List[str] = DebertaVaModelTester(self )
_lowerCamelCase : Any = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*__lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Tuple = DebertaVaModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase):
@unittest.skip(reason='''Model not available yet''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
pass
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Tuple = DebertaVaModel.from_pretrained('''microsoft/deberta-v2-xlarge''' )
_lowerCamelCase : List[str] = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
_lowerCamelCase : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowerCamelCase : Tuple = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0]
# compare the actual values for a slice.
_lowerCamelCase : Union[str, Any] = torch.tensor(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCAmelCase , atol=1E-4 ) , f'''{output[:, 1:4, 1:4]}''' )
| 83 | 0 |
import os
# Precomputes a list of the 100 first triangular numbers
__snake_case :Optional[int] =[int(0.5 * n * (n + 1)) for n in range(1, 101)]
def lowerCamelCase_ ( ) -> int:
'''simple docstring'''
A = os.path.dirname(os.path.realpath(lowerCAmelCase__ ) )
A = os.path.join(lowerCAmelCase__ , 'words.txt' )
A = ''
with open(lowerCAmelCase__ ) as f:
A = f.readline()
A = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
A = [
word
for word in [sum(ord(lowerCAmelCase__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(lowerCAmelCase__ )
if __name__ == "__main__":
print(solution()) | 106 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
lowerCAmelCase__ = '''hf-internal-testing/tiny-random-bert'''
lowerCAmelCase__ = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''')
lowerCAmelCase__ = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6'''
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = cached_file(__lowerCAmelCase , __lowerCAmelCase )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(__lowerCAmelCase ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) ) )
with open(os.path.join(__lowerCAmelCase , '''refs''' , '''main''' ) ) as f:
_lowerCamelCase : Optional[int] = f.read()
self.assertEqual(__lowerCAmelCase , os.path.join(__lowerCAmelCase , '''snapshots''' , __lowerCAmelCase , __lowerCAmelCase ) )
self.assertTrue(os.path.isfile(__lowerCAmelCase ) )
# File is cached at the same place the second time.
_lowerCamelCase : Tuple = cached_file(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
# Using a specific revision to test the full commit hash.
_lowerCamelCase : Dict = cached_file(__lowerCAmelCase , __lowerCAmelCase , revision='''9b8c223''' )
self.assertEqual(__lowerCAmelCase , os.path.join(__lowerCAmelCase , '''snapshots''' , __lowerCAmelCase , __lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
with self.assertRaisesRegex(__lowerCAmelCase , '''is not a valid model identifier''' ):
_lowerCamelCase : Optional[int] = cached_file('''tiny-random-bert''' , __lowerCAmelCase )
with self.assertRaisesRegex(__lowerCAmelCase , '''is not a valid git identifier''' ):
_lowerCamelCase : str = cached_file(__lowerCAmelCase , __lowerCAmelCase , revision='''aaaa''' )
with self.assertRaisesRegex(__lowerCAmelCase , '''does not appear to have a file named''' ):
_lowerCamelCase : int = cached_file(__lowerCAmelCase , '''conf''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
with self.assertRaisesRegex(__lowerCAmelCase , '''does not appear to have a file named''' ):
_lowerCamelCase : Dict = cached_file(__lowerCAmelCase , '''conf''' )
with open(os.path.join(__lowerCAmelCase , '''refs''' , '''main''' ) ) as f:
_lowerCamelCase : List[Any] = f.read()
self.assertTrue(os.path.isfile(os.path.join(__lowerCAmelCase , '''.no_exist''' , __lowerCAmelCase , '''conf''' ) ) )
_lowerCamelCase : str = cached_file(__lowerCAmelCase , '''conf''' , _raise_exceptions_for_missing_entries=__lowerCAmelCase )
self.assertIsNone(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = cached_file(__lowerCAmelCase , '''conf''' , local_files_only=__lowerCAmelCase , _raise_exceptions_for_missing_entries=__lowerCAmelCase )
self.assertIsNone(__lowerCAmelCase )
_lowerCamelCase : Any = mock.Mock()
_lowerCamelCase : Optional[Any] = 5_0_0
_lowerCamelCase : Dict = {}
_lowerCamelCase : List[Any] = HTTPError
_lowerCamelCase : int = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=__lowerCAmelCase ) as mock_head:
_lowerCamelCase : Union[str, Any] = cached_file(__lowerCAmelCase , '''conf''' , _raise_exceptions_for_connection_errors=__lowerCAmelCase )
self.assertIsNone(__lowerCAmelCase )
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __lowerCAmelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __lowerCAmelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(__lowerCAmelCase , '''is not a valid model identifier''' ):
get_file_from_repo('''bert-base-case''' , __lowerCAmelCase )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(__lowerCAmelCase , '''is not a valid git identifier''' ):
get_file_from_repo('''bert-base-cased''' , __lowerCAmelCase , revision='''ahaha''' )
_lowerCamelCase : Dict = get_file_from_repo('''bert-base-cased''' , __lowerCAmelCase )
# The name is the cached name which is not very easy to test, so instead we load the content.
_lowerCamelCase : Dict = json.loads(open(__lowerCAmelCase , '''r''' ).read() )
self.assertEqual(config['''hidden_size'''] , 7_6_8 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : Any = Path(__lowerCAmelCase ) / '''a.txt'''
filename.touch()
self.assertEqual(get_file_from_repo(__lowerCAmelCase , '''a.txt''' ) , str(__lowerCAmelCase ) )
self.assertIsNone(get_file_from_repo(__lowerCAmelCase , '''b.txt''' ) )
| 83 | 0 |
'''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
_UpperCAmelCase : int = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
def __init__( self : List[Any], UpperCamelCase__ : CLIPSegForImageSegmentation, UpperCamelCase__ : CLIPSegProcessor, UpperCamelCase__ : AutoencoderKL, UpperCamelCase__ : CLIPTextModel, UpperCamelCase__ : CLIPTokenizer, UpperCamelCase__ : UNetaDConditionModel, UpperCamelCase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], UpperCamelCase__ : StableDiffusionSafetyChecker, UpperCamelCase__ : CLIPImageProcessor, ) -> int:
super().__init__()
if hasattr(scheduler.config, 'steps_offset' ) and scheduler.config.steps_offset != 1:
_A = (
f'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'
f' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '
'to update the config accordingly as leaving `steps_offset` might led to incorrect results'
' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'
' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'
' file'
)
deprecate('steps_offset!=1', '1.0.0', UpperCamelCase__, standard_warn=UpperCamelCase__ )
_A = dict(scheduler.config )
_A = 1
_A = FrozenDict(UpperCamelCase__ )
if hasattr(scheduler.config, 'skip_prk_steps' ) and scheduler.config.skip_prk_steps is False:
_A = (
f'The configuration file of this scheduler: {scheduler} has not set the configuration'
' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'
' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'
' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'
' Hub, it would be very nice if you could open a Pull request for the'
' `scheduler/scheduler_config.json` file'
)
deprecate('skip_prk_steps not set', '1.0.0', UpperCamelCase__, standard_warn=UpperCamelCase__ )
_A = dict(scheduler.config )
_A = True
_A = FrozenDict(UpperCamelCase__ )
if safety_checker is None:
logger.warning(
f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
segmentation_model=UpperCamelCase__, segmentation_processor=UpperCamelCase__, vae=UpperCamelCase__, text_encoder=UpperCamelCase__, tokenizer=UpperCamelCase__, unet=UpperCamelCase__, scheduler=UpperCamelCase__, safety_checker=UpperCamelCase__, feature_extractor=UpperCamelCase__, )
def __UpperCAmelCase ( self : Optional[Any], UpperCamelCase__ : Optional[Union[str, int]] = "auto" ) -> List[Any]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_A = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCamelCase__ )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
self.enable_attention_slicing(UpperCamelCase__ )
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
_A = torch.device('cuda' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase__, UpperCamelCase__ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCAmelCase ( self : int ) -> Dict:
if self.device != torch.device('meta' ) or not hasattr(self.unet, '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCamelCase__, '_hf_hook' )
and hasattr(module._hf_hook, 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Optional[int], UpperCamelCase__ : Union[str, List[str]], UpperCamelCase__ : Union[torch.FloatTensor, PIL.Image.Image], UpperCamelCase__ : str, UpperCamelCase__ : int = 5_12, UpperCamelCase__ : int = 5_12, UpperCamelCase__ : int = 50, UpperCamelCase__ : float = 7.5, UpperCamelCase__ : Optional[Union[str, List[str]]] = None, UpperCamelCase__ : Optional[int] = 1, UpperCamelCase__ : float = 0.0, UpperCamelCase__ : Optional[torch.Generator] = None, UpperCamelCase__ : Optional[torch.FloatTensor] = None, UpperCamelCase__ : Optional[str] = "pil", UpperCamelCase__ : bool = True, UpperCamelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCamelCase__ : int = 1, **UpperCamelCase__ : Dict, ) -> Tuple:
_A = self.segmentation_processor(
text=[text], images=[image], padding='max_length', return_tensors='pt' ).to(self.device )
_A = self.segmentation_model(**UpperCamelCase__ )
_A = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
_A = self.numpy_to_pil(UpperCamelCase__ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
_A = StableDiffusionInpaintPipeline(
vae=self.vae, text_encoder=self.text_encoder, tokenizer=self.tokenizer, unet=self.unet, scheduler=self.scheduler, safety_checker=self.safety_checker, feature_extractor=self.feature_extractor, )
return inpainting_pipeline(
prompt=UpperCamelCase__, image=UpperCamelCase__, mask_image=UpperCamelCase__, height=UpperCamelCase__, width=UpperCamelCase__, num_inference_steps=UpperCamelCase__, guidance_scale=UpperCamelCase__, negative_prompt=UpperCamelCase__, num_images_per_prompt=UpperCamelCase__, eta=UpperCamelCase__, generator=UpperCamelCase__, latents=UpperCamelCase__, output_type=UpperCamelCase__, return_dict=UpperCamelCase__, callback=UpperCamelCase__, callback_steps=UpperCamelCase__, )
| 107 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __snake_case ( _lowercase):
snake_case__ : List[str] = "cvt"
def __init__( self : Any , __lowerCAmelCase : Union[str, Any]=3 , __lowerCAmelCase : List[str]=[7, 3, 3] , __lowerCAmelCase : int=[4, 2, 2] , __lowerCAmelCase : int=[2, 1, 1] , __lowerCAmelCase : str=[6_4, 1_9_2, 3_8_4] , __lowerCAmelCase : Dict=[1, 3, 6] , __lowerCAmelCase : Optional[Any]=[1, 2, 1_0] , __lowerCAmelCase : Dict=[4.0, 4.0, 4.0] , __lowerCAmelCase : Dict=[0.0, 0.0, 0.0] , __lowerCAmelCase : Union[str, Any]=[0.0, 0.0, 0.0] , __lowerCAmelCase : int=[0.0, 0.0, 0.1] , __lowerCAmelCase : Union[str, Any]=[True, True, True] , __lowerCAmelCase : str=[False, False, True] , __lowerCAmelCase : List[str]=["dw_bn", "dw_bn", "dw_bn"] , __lowerCAmelCase : List[Any]=[3, 3, 3] , __lowerCAmelCase : Dict=[1, 1, 1] , __lowerCAmelCase : str=[2, 2, 2] , __lowerCAmelCase : Optional[Any]=[1, 1, 1] , __lowerCAmelCase : Optional[Any]=[1, 1, 1] , __lowerCAmelCase : str=0.02 , __lowerCAmelCase : Any=1E-12 , **__lowerCAmelCase : int , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
_lowerCamelCase : Optional[int] = num_channels
_lowerCamelCase : int = patch_sizes
_lowerCamelCase : Optional[Any] = patch_stride
_lowerCamelCase : str = patch_padding
_lowerCamelCase : Any = embed_dim
_lowerCamelCase : Optional[Any] = num_heads
_lowerCamelCase : Dict = depth
_lowerCamelCase : Optional[int] = mlp_ratio
_lowerCamelCase : Any = attention_drop_rate
_lowerCamelCase : Any = drop_rate
_lowerCamelCase : Dict = drop_path_rate
_lowerCamelCase : Optional[int] = qkv_bias
_lowerCamelCase : int = cls_token
_lowerCamelCase : int = qkv_projection_method
_lowerCamelCase : Optional[Any] = kernel_qkv
_lowerCamelCase : List[str] = padding_kv
_lowerCamelCase : Tuple = stride_kv
_lowerCamelCase : Union[str, Any] = padding_q
_lowerCamelCase : Optional[Any] = stride_q
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : Optional[int] = layer_norm_eps
| 83 | 0 |
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def lowerCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(lowerCamelCase , """num_attention_heads""" ) )
self.parent.assertTrue(hasattr(lowerCamelCase , """num_encoder_blocks""" ) )
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : List[str] , lowerCamelCase : int , lowerCamelCase : int=13 , lowerCamelCase : Any=64 , lowerCamelCase : Union[str, Any]=3 , lowerCamelCase : str=4 , lowerCamelCase : List[str]=[2, 2, 2, 2] , lowerCamelCase : Tuple=[8, 4, 2, 1] , lowerCamelCase : Optional[int]=[16, 32, 64, 128] , lowerCamelCase : int=[1, 4, 8, 16] , lowerCamelCase : Optional[Any]=[1, 2, 4, 8] , lowerCamelCase : str=True , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : Optional[Any]="gelu" , lowerCamelCase : Tuple=0.1 , lowerCamelCase : Optional[Any]=0.1 , lowerCamelCase : Tuple=0.02 , lowerCamelCase : List[Any]=3 , lowerCamelCase : Tuple=None , ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = num_encoder_blocks
_UpperCAmelCase = sr_ratios
_UpperCAmelCase = depths
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = downsampling_rates
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = scope
def lowerCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase ( self : Optional[Any] , lowerCamelCase : List[str] , lowerCamelCase : str , lowerCamelCase : int ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = SegformerModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
_UpperCAmelCase = model(lowerCamelCase )
_UpperCAmelCase = _UpperCAmelCase = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def lowerCamelCase ( self : str , lowerCamelCase : List[str] , lowerCamelCase : Any , lowerCamelCase : List[str] ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = SegformerForSemanticSegmentation(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
_UpperCAmelCase = model(lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
_UpperCAmelCase = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def lowerCamelCase ( self : List[Any] , lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : List[str] ) -> Any:
"""simple docstring"""
_UpperCAmelCase = 1
_UpperCAmelCase = SegformerForSemanticSegmentation(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
_UpperCAmelCase = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(lowerCamelCase )
_UpperCAmelCase = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertGreater(result.loss , 0.0 )
def lowerCamelCase ( self : str ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
_lowerCamelCase = (
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def lowerCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = SegformerModelTester(self )
_UpperCAmelCase = SegformerConfigTester(self , config_class=lowerCamelCase )
def lowerCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def lowerCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*lowerCamelCase )
def lowerCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*lowerCamelCase )
@unittest.skip("""SegFormer does not use inputs_embeds""" )
def lowerCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("""SegFormer does not have get_input_embeddings method and get_output_embeddings methods""" )
def lowerCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
pass
def lowerCamelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(lowerCamelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def lowerCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
for model_class in self.all_model_classes:
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = True
_UpperCAmelCase = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
_UpperCAmelCase = outputs.attentions
_UpperCAmelCase = sum(self.model_tester.depths )
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_UpperCAmelCase = True
_UpperCAmelCase = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
_UpperCAmelCase = outputs.attentions
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
# verify the first attentions (first block, first layer)
_UpperCAmelCase = (self.model_tester.image_size // 4) ** 2
_UpperCAmelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
_UpperCAmelCase = (self.model_tester.image_size // 32) ** 2
_UpperCAmelCase = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
_UpperCAmelCase = len(lowerCamelCase )
# Check attention is always last and order is fine
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
self.assertEqual(out_len + 1 , len(lowerCamelCase ) )
_UpperCAmelCase = outputs.attentions
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
# verify the first attentions (first block, first layer)
_UpperCAmelCase = (self.model_tester.image_size // 4) ** 2
_UpperCAmelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def lowerCamelCase ( self : str ) -> int:
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase : List[str] , lowerCamelCase : str , lowerCamelCase : Optional[int] ):
_UpperCAmelCase = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = self.model_tester.num_encoder_blocks
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowerCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
if not self.model_tester.is_training:
return
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCamelCase ):
continue
_UpperCAmelCase = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.train()
_UpperCAmelCase = self._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase )
_UpperCAmelCase = model(**lowerCamelCase ).loss
loss.backward()
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase ( self : Dict ) -> str:
"""simple docstring"""
pass
@slow
def lowerCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = SegformerModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
_UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
# only resize + normalize
_UpperCAmelCase = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowerCamelCase , align=lowerCamelCase , do_random_crop=lowerCamelCase )
_UpperCAmelCase = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
lowerCamelCase )
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=lowerCamelCase , return_tensors="""pt""" )
_UpperCAmelCase = encoded_inputs.pixel_values.to(lowerCamelCase )
with torch.no_grad():
_UpperCAmelCase = model(lowerCamelCase )
_UpperCAmelCase = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
_UpperCAmelCase = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , lowerCamelCase , atol=1E-4 ) )
@slow
def lowerCamelCase ( self : str ) -> int:
"""simple docstring"""
# only resize + normalize
_UpperCAmelCase = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowerCamelCase , align=lowerCamelCase , do_random_crop=lowerCamelCase )
_UpperCAmelCase = SegformerForSemanticSegmentation.from_pretrained(
"""nvidia/segformer-b1-finetuned-cityscapes-1024-1024""" ).to(lowerCamelCase )
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=lowerCamelCase , return_tensors="""pt""" )
_UpperCAmelCase = encoded_inputs.pixel_values.to(lowerCamelCase )
with torch.no_grad():
_UpperCAmelCase = model(lowerCamelCase )
_UpperCAmelCase = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
_UpperCAmelCase = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , lowerCamelCase , atol=1E-1 ) )
@slow
def lowerCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
# only resize + normalize
_UpperCAmelCase = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowerCamelCase , align=lowerCamelCase , do_random_crop=lowerCamelCase )
_UpperCAmelCase = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
lowerCamelCase )
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=lowerCamelCase , return_tensors="""pt""" )
_UpperCAmelCase = encoded_inputs.pixel_values.to(lowerCamelCase )
with torch.no_grad():
_UpperCAmelCase = model(lowerCamelCase )
_UpperCAmelCase = outputs.logits.detach().cpu()
_UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase , target_sizes=[(500, 300)] )
_UpperCAmelCase = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , lowerCamelCase )
_UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase )
_UpperCAmelCase = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , lowerCamelCase ) | 108 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class __snake_case ( _lowercase):
snake_case__ : torch.FloatTensor
snake_case__ : torch.FloatTensor
class __snake_case ( _lowercase , _lowercase):
snake_case__ : int = 1
@register_to_config
def __init__( self : str , __lowerCAmelCase : int = 2_0_0_0 , __lowerCAmelCase : float = 0.15 , __lowerCAmelCase : float = 0.01 , __lowerCAmelCase : float = 13_48.0 , __lowerCAmelCase : float = 1E-5 , __lowerCAmelCase : int = 1 , ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = sigma_max
# setable values
_lowerCamelCase : Dict = None
self.set_sigmas(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : Optional[int] = None ):
"""simple docstring"""
return sample
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : float = None , __lowerCAmelCase : Union[str, torch.device] = None ):
"""simple docstring"""
_lowerCamelCase : Tuple = sampling_eps if sampling_eps is not None else self.config.sampling_eps
_lowerCamelCase : Optional[int] = torch.linspace(1 , __lowerCAmelCase , __lowerCAmelCase , device=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : float = None , __lowerCAmelCase : float = None , __lowerCAmelCase : float = None ):
"""simple docstring"""
_lowerCamelCase : List[str] = sigma_min if sigma_min is not None else self.config.sigma_min
_lowerCamelCase : int = sigma_max if sigma_max is not None else self.config.sigma_max
_lowerCamelCase : Any = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[Any] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
_lowerCamelCase : Optional[int] = torch.exp(torch.linspace(math.log(__lowerCAmelCase ) , math.log(__lowerCAmelCase ) , __lowerCAmelCase ) )
_lowerCamelCase : Tuple = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : int , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : Optional[torch.Generator] = None , __lowerCAmelCase : bool = True , ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
_lowerCamelCase : Tuple = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
_lowerCamelCase : Dict = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
_lowerCamelCase : Optional[int] = timesteps.to(self.discrete_sigmas.device )
_lowerCamelCase : Any = self.discrete_sigmas[timesteps].to(sample.device )
_lowerCamelCase : int = self.get_adjacent_sigma(__lowerCAmelCase , __lowerCAmelCase ).to(sample.device )
_lowerCamelCase : Any = torch.zeros_like(__lowerCAmelCase )
_lowerCamelCase : Any = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
_lowerCamelCase : Union[str, Any] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
_lowerCamelCase : List[Any] = diffusion.unsqueeze(-1 )
_lowerCamelCase : int = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
_lowerCamelCase : List[str] = randn_tensor(
sample.shape , layout=sample.layout , generator=__lowerCAmelCase , device=sample.device , dtype=sample.dtype )
_lowerCamelCase : List[Any] = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
_lowerCamelCase : int = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__lowerCAmelCase , prev_sample_mean=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : Optional[torch.Generator] = None , __lowerCAmelCase : bool = True , ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
_lowerCamelCase : Union[str, Any] = randn_tensor(sample.shape , layout=sample.layout , generator=__lowerCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
_lowerCamelCase : Union[str, Any] = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
_lowerCamelCase : Tuple = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
_lowerCamelCase : str = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
_lowerCamelCase : Tuple = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
_lowerCamelCase : Union[str, Any] = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
_lowerCamelCase : str = step_size.unsqueeze(-1 )
_lowerCamelCase : Any = sample + step_size * model_output
_lowerCamelCase : int = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : torch.FloatTensor , ):
"""simple docstring"""
_lowerCamelCase : Dict = timesteps.to(original_samples.device )
_lowerCamelCase : Union[str, Any] = self.discrete_sigmas.to(original_samples.device )[timesteps]
_lowerCamelCase : Union[str, Any] = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__lowerCAmelCase ) * sigmas[:, None, None, None]
)
_lowerCamelCase : int = noise + original_samples
return noisy_samples
def __len__( self : Optional[int] ):
"""simple docstring"""
return self.config.num_train_timesteps
| 83 | 0 |
'''simple docstring'''
import re
def __magic_name__ ( __UpperCAmelCase ) -> bool:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = re.compile(
R"""^(?:0|94|\+94|0{2}94)""" R"""7(0|1|2|4|5|6|7|8)""" R"""(-| |)""" R"""\d{7}$""" )
return bool(re.search(__UpperCAmelCase , __UpperCAmelCase ) )
if __name__ == "__main__":
a = "0094702343221"
print(is_sri_lankan_phone_number(phone))
| 109 |
"""simple docstring"""
from torch import nn
def snake_case_ ( A_ : int ):
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F'''Unsupported activation function: {act_fn}''' )
| 83 | 0 |
"""simple docstring"""
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
UpperCamelCase__ = 'tiny-wmt19-en-ru'
# Build
# borrowed from a test
UpperCamelCase__ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
UpperCamelCase__ = dict(zip(vocab, range(len(vocab))))
UpperCamelCase__ = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ = Path(tmpdirname)
UpperCamelCase__ = build_dir / VOCAB_FILES_NAMES['src_vocab_file']
UpperCamelCase__ = build_dir / VOCAB_FILES_NAMES['tgt_vocab_file']
UpperCamelCase__ = build_dir / VOCAB_FILES_NAMES['merges_file']
with open(src_vocab_file, 'w') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, 'w') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, 'w') as fp:
fp.write('\n'.join(merges))
UpperCamelCase__ = FSMTTokenizer(
langs=['en', 'ru'],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
UpperCamelCase__ = FSMTConfig(
langs=['ru', 'en'],
src_vocab_size=10_00,
tgt_vocab_size=10_00,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
UpperCamelCase__ = FSMTForConditionalGeneration(config)
print(f'num of params {tiny_model.num_parameters()}')
# Test
UpperCamelCase__ = tokenizer(['Making tiny model'], return_tensors='pt')
UpperCamelCase__ = tiny_model(**batch)
print('test output:', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f'Generated {mname_tiny}')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 110 |
"""simple docstring"""
def snake_case_ ( A_ : int, A_ : int ):
'''simple docstring'''
return int(input_a == input_a == 0 )
def snake_case_ ( ):
'''simple docstring'''
print('''Truth Table of NOR Gate:''' )
print('''| Input 1 | Input 2 | Output |''' )
print(F'''| 0 | 0 | {nor_gate(0, 0 )} |''' )
print(F'''| 0 | 1 | {nor_gate(0, 1 )} |''' )
print(F'''| 1 | 0 | {nor_gate(1, 0 )} |''' )
print(F'''| 1 | 1 | {nor_gate(1, 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 83 | 0 |
'''simple docstring'''
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _UpperCamelCase ( ):
UpperCAmelCase__ : int = ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=A_ )
UpperCAmelCase__ : Union[str, Any] = parser.add_subparsers(help="""accelerate command helpers""" )
# Register commands
get_config_parser(subparsers=A_ )
env_command_parser(subparsers=A_ )
launch_command_parser(subparsers=A_ )
tpu_command_parser(subparsers=A_ )
test_command_parser(subparsers=A_ )
# Let's go
UpperCAmelCase__ : str = parser.parse_args()
if not hasattr(A_ , """func""" ):
parser.print_help()
exit(1 )
# Run
args.func(A_ )
if __name__ == "__main__":
main() | 407 |
"""simple docstring"""
from __future__ import annotations
def snake_case_ ( A_ : list[list[int]] ):
'''simple docstring'''
for i in range(1, len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1, len(A_ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1, len(A_ ) ):
for j in range(1, len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j], matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 | 0 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__lowerCamelCase : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
__lowerCamelCase : Any = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class UpperCAmelCase ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self : List[Any] ) -> Optional[int]:
_UpperCamelCase =tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
_UpperCamelCase =self.diffusers_dir
shutil.copy(
os.path.join(__lowerCAmelCase , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def UpperCamelCase__ ( self : int ) -> List[Any]:
_UpperCamelCase ='''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def UpperCamelCase__ ( self : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int]=None ) -> Tuple:
_UpperCamelCase =comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
_UpperCamelCase =comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
_UpperCamelCase =black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_UpperCamelCase =black.format_str(__lowerCAmelCase , mode=__lowerCAmelCase )
_UpperCamelCase =os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(__lowerCAmelCase , '''w''' , newline='''\n''' ) as f:
f.write(__lowerCAmelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__lowerCAmelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__lowerCAmelCase )
with open(__lowerCAmelCase , '''r''' ) as f:
self.assertTrue(f.read() , __lowerCAmelCase )
def UpperCamelCase__ ( self : List[str] ) -> Optional[int]:
_UpperCamelCase =check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def UpperCamelCase__ ( self : Optional[int] ) -> str:
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , __lowerCAmelCase , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , __lowerCAmelCase ) , )
# Copy consistency with a really long name
_UpperCamelCase ='''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}''' , F'''{long_class_name}SchedulerOutput''' , re.sub('''Bert''' , __lowerCAmelCase , __lowerCAmelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , __lowerCAmelCase , overwrite_result=re.sub('''DDPM''' , '''Test''' , __lowerCAmelCase ) , )
| 404 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('''T''')
class __snake_case ( Generic[T]):
def __init__( self : int , __lowerCAmelCase : T ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = data
_lowerCamelCase : Node[T] | None = None
def __str__( self : Optional[Any] ):
"""simple docstring"""
return f'''{self.data}'''
class __snake_case ( Generic[T]):
def __init__( self : int ):
"""simple docstring"""
_lowerCamelCase : Node[T] | None = None
def __iter__( self : str ):
"""simple docstring"""
_lowerCamelCase : List[str] = self.top
while node:
yield node.data
_lowerCamelCase : Any = node.next
def __str__( self : int ):
"""simple docstring"""
return "->".join([str(__lowerCAmelCase ) for item in self] )
def __len__( self : int ):
"""simple docstring"""
return len(tuple(iter(self ) ) )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return self.top is None
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : T ):
"""simple docstring"""
_lowerCamelCase : Tuple = Node(__lowerCAmelCase )
if not self.is_empty():
_lowerCamelCase : Optional[int] = self.top
_lowerCamelCase : List[str] = node
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
if self.is_empty():
raise IndexError('''pop from empty stack''' )
assert isinstance(self.top , __lowerCAmelCase )
_lowerCamelCase : Any = self.top
_lowerCamelCase : Any = self.top.next
return pop_node.data
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
if self.is_empty():
raise IndexError('''peek from empty stack''' )
assert self.top is not None
return self.top.data
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : List[str] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 83 | 0 |
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowerCamelCase__ = '''.'''
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
lowerCamelCase__ = [
'''Assert''',
'''AssignVariableOp''',
'''EmptyTensorList''',
'''MergeV2Checkpoints''',
'''ReadVariableOp''',
'''ResourceGather''',
'''RestoreV2''',
'''SaveV2''',
'''ShardedFilename''',
'''StatefulPartitionedCall''',
'''StaticRegexFullMatch''',
'''VarHandleOp''',
]
def A(__a: int , __a: Optional[Any] , __a: str ):
lowerCAmelCase_ = SavedModel()
lowerCAmelCase_ = []
with open(os.path.join(A_ , "utils" , "tf_ops" , "onnx.json" ) ) as f:
lowerCAmelCase_ = json.load(A_ )['''opsets''']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(A_ )] )
with open(A_ , "rb" ) as f:
saved_model.ParseFromString(f.read() )
lowerCAmelCase_ = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
lowerCAmelCase_ = sorted(A_ )
lowerCAmelCase_ = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(A_ )
if strict and len(A_ ) > 0:
raise Exception(F"Found the following incompatible ops for the opset {opset}:\n" + incompatible_ops )
elif len(A_ ) > 0:
print(F"Found the following incompatible ops for the opset {opset}:" )
print(*A_ , sep="\n" )
else:
print(F"The saved model {saved_model_path} can properly be converted with ONNX." )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''')
parser.add_argument(
'''--opset''', default=12, type=int, help='''The ONNX opset against which the model has to be tested.'''
)
parser.add_argument(
'''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.'''
)
parser.add_argument(
'''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)'''
)
lowerCamelCase__ = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 122 |
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCAmelCase__ = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase__ = importlib.util.spec_from_file_location(
'''transformers''',
os.path.join(PATH_TO_TRANSFORMERS, '''__init__.py'''),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
lowerCAmelCase__ = spec.loader.load_module()
lowerCAmelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCAmelCase__ = re.compile('''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
lowerCAmelCase__ = {
'''CLIPConfigMixin''',
'''DecisionTransformerConfigMixin''',
'''EncoderDecoderConfigMixin''',
'''RagConfigMixin''',
'''SpeechEncoderDecoderConfigMixin''',
'''VisionEncoderDecoderConfigMixin''',
'''VisionTextDualEncoderConfigMixin''',
}
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Any = []
for config_class in list(CONFIG_MAPPING.values() ):
_lowerCamelCase : Tuple = False
# source code of `config_class`
_lowerCamelCase : int = inspect.getsource(A_ )
_lowerCamelCase : str = _re_checkpoint.findall(A_ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
_lowerCamelCase , _lowerCamelCase : Tuple = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
_lowerCamelCase : Tuple = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
_lowerCamelCase : Union[str, Any] = True
break
_lowerCamelCase : Tuple = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(A_ )
if len(A_ ) > 0:
_lowerCamelCase : Union[str, Any] = '''\n'''.join(sorted(A_ ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 83 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCamelCase ( _lowercase , unittest.TestCase ):
_UpperCAmelCase = CanineTokenizer
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
_lowerCAmelCase : int = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowerCamelCase ( self ):
'''simple docstring'''
return CanineTokenizer.from_pretrained('google/canine-s' )
def __lowerCamelCase ( self ,**_A ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.tokenizer_class.from_pretrained(self.tmpdirname ,**__lowerCAmelCase )
_lowerCAmelCase : Optional[int] = 1024
return tokenizer
@require_torch
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.canine_tokenizer
_lowerCAmelCase : List[str] = ['''Life is like a box of chocolates.''', '''You never know what you\'re gonna get.''']
# fmt: off
_lowerCAmelCase : Tuple = [5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0]
# fmt: on
_lowerCAmelCase : str = tokenizer(__lowerCAmelCase ,padding=__lowerCAmelCase ,return_tensors='pt' )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCAmelCase : Tuple = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__lowerCAmelCase ,__lowerCAmelCase )
self.assertEqual((2, 39) ,batch.input_ids.shape )
self.assertEqual((2, 39) ,batch.attention_mask.shape )
@require_torch
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self.canine_tokenizer
_lowerCAmelCase : Optional[Any] = ['''Once there was a man.''', '''He wrote a test in HuggingFace Tranformers.''']
_lowerCAmelCase : Tuple = tokenizer(__lowerCAmelCase ,padding=__lowerCAmelCase ,return_tensors='pt' )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn('input_ids' ,__lowerCAmelCase )
self.assertIn('attention_mask' ,__lowerCAmelCase )
self.assertIn('token_type_ids' ,__lowerCAmelCase )
@require_torch
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self.canine_tokenizer
_lowerCAmelCase : Any = [
'''What\'s the weater?''',
'''It\'s about 25 degrees.''',
]
_lowerCAmelCase : int = tokenizer(
text_target=__lowerCAmelCase ,max_length=32 ,padding='max_length' ,truncation=__lowerCAmelCase ,return_tensors='pt' )
self.assertEqual(32 ,targets['input_ids'].shape[1] )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length ,42 )
# Now let's start the test
_lowerCAmelCase : Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
_lowerCAmelCase : List[Any] = ''' He is very happy, UNwant\u00E9d,running'''
_lowerCAmelCase : Union[str, Any] = tokenizer.encode(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase )
tokenizer.save_pretrained(__lowerCAmelCase )
_lowerCAmelCase : Optional[int] = tokenizer.__class__.from_pretrained(__lowerCAmelCase )
_lowerCAmelCase : Optional[Any] = after_tokenizer.encode(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase ,__lowerCAmelCase )
shutil.rmtree(__lowerCAmelCase )
_lowerCAmelCase : List[Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
_lowerCAmelCase : Any = tempfile.mkdtemp()
_lowerCAmelCase : Dict = ''' He is very happy, UNwant\u00E9d,running'''
_lowerCAmelCase : int = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
_lowerCAmelCase : Optional[Any] = chr(0xE007 )
additional_special_tokens.append(__lowerCAmelCase )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_lowerCAmelCase : Tuple = tokenizer.encode(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase )
tokenizer.save_pretrained(__lowerCAmelCase )
_lowerCAmelCase : Optional[int] = tokenizer.__class__.from_pretrained(__lowerCAmelCase )
_lowerCAmelCase : int = after_tokenizer.encode(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase ,__lowerCAmelCase )
self.assertIn(__lowerCAmelCase ,after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length ,42 )
_lowerCAmelCase : Any = tokenizer.__class__.from_pretrained(__lowerCAmelCase ,model_max_length=43 )
self.assertEqual(tokenizer.model_max_length ,43 )
shutil.rmtree(__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCAmelCase : Optional[Any] = self.get_clean_sequence(__lowerCAmelCase )
# a special token for Canine can be defined as follows:
_lowerCAmelCase : str = 0xE005
_lowerCAmelCase : List[str] = chr(__lowerCAmelCase )
tokenizer.add_special_tokens({'cls_token': special_token} )
_lowerCAmelCase : List[Any] = tokenizer.encode(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase )
self.assertEqual(len(__lowerCAmelCase ) ,1 )
_lowerCAmelCase : int = tokenizer.decode(ids + encoded_special_token ,clean_up_tokenization_spaces=__lowerCAmelCase )
_lowerCAmelCase : Optional[int] = tokenizer.encode(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase )
_lowerCAmelCase : Optional[Any] = tokenizer.encode(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase )
_lowerCAmelCase : List[Any] = tokenizer.encode(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase ,input_encoded + special_token_id )
_lowerCAmelCase : int = tokenizer.decode(__lowerCAmelCase ,skip_special_tokens=__lowerCAmelCase )
self.assertTrue(special_token not in decoded )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCAmelCase : Optional[int] = chr(0xE005 )
_lowerCAmelCase : str = chr(0xE006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] ,special_tokens=__lowerCAmelCase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({'additional_special_tokens': [SPECIAL_TOKEN_2]} )
_lowerCAmelCase : Union[str, Any] = tokenizer.tokenize(__lowerCAmelCase )
_lowerCAmelCase : List[str] = tokenizer.tokenize(__lowerCAmelCase )
self.assertEqual(len(__lowerCAmelCase ) ,1 )
self.assertEqual(len(__lowerCAmelCase ) ,1 )
self.assertEqual(token_a[0] ,__lowerCAmelCase )
self.assertEqual(token_a[0] ,__lowerCAmelCase )
@require_tokenizers
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# a special token for Canine can be defined as follows:
_lowerCAmelCase : int = 0xE006
_lowerCAmelCase : Union[str, Any] = chr(__lowerCAmelCase )
_lowerCAmelCase : Any = AddedToken(__lowerCAmelCase ,lstrip=__lowerCAmelCase )
tokenizer.add_special_tokens({'additional_special_tokens': [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(__lowerCAmelCase )
tokenizer.from_pretrained(__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase ,'special_tokens_map.json' ) ,encoding='utf-8' ) as json_file:
_lowerCAmelCase : Any = json.load(__lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase ,'tokenizer_config.json' ) ,encoding='utf-8' ) as json_file:
_lowerCAmelCase : Optional[int] = json.load(__lowerCAmelCase )
# a special token for Canine can be defined as follows:
_lowerCAmelCase : Optional[int] = 0xE006
_lowerCAmelCase : int = chr(__lowerCAmelCase )
_lowerCAmelCase : Dict = [new_token_a]
_lowerCAmelCase : str = [new_token_a]
with open(os.path.join(__lowerCAmelCase ,'special_tokens_map.json' ) ,'w' ,encoding='utf-8' ) as outfile:
json.dump(__lowerCAmelCase ,__lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase ,'tokenizer_config.json' ) ,'w' ,encoding='utf-8' ) as outfile:
json.dump(__lowerCAmelCase ,__lowerCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_lowerCAmelCase : str = tokenizer_class.from_pretrained(__lowerCAmelCase ,extra_ids=0 )
self.assertIn(__lowerCAmelCase ,tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] ,tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) ,)
_lowerCAmelCase : Any = 0xE007
_lowerCAmelCase : Any = chr(__lowerCAmelCase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_lowerCAmelCase : Tuple = [AddedToken(__lowerCAmelCase ,lstrip=__lowerCAmelCase )]
_lowerCAmelCase : Optional[int] = tokenizer_class.from_pretrained(
__lowerCAmelCase ,additional_special_tokens=__lowerCAmelCase ,extra_ids=0 )
self.assertIn(__lowerCAmelCase ,tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] ,tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCAmelCase : List[Any] = '''hello world'''
if self.space_between_special_tokens:
_lowerCAmelCase : int = '''[CLS] hello world [SEP]'''
else:
_lowerCAmelCase : str = input
_lowerCAmelCase : Union[str, Any] = tokenizer.encode(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase )
_lowerCAmelCase : Optional[Any] = tokenizer.decode(__lowerCAmelCase ,spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(__lowerCAmelCase ,[output, output.lower()] )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCAmelCase : Dict = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
_lowerCAmelCase : Any = '''a'''
_lowerCAmelCase : str = ord(__lowerCAmelCase )
for attr in attributes_list:
setattr(__lowerCAmelCase ,attr + '_id' ,__lowerCAmelCase )
self.assertEqual(getattr(__lowerCAmelCase ,__lowerCAmelCase ) ,__lowerCAmelCase )
self.assertEqual(getattr(__lowerCAmelCase ,attr + '_id' ) ,__lowerCAmelCase )
setattr(__lowerCAmelCase ,attr + '_id' ,__lowerCAmelCase )
self.assertEqual(getattr(__lowerCAmelCase ,__lowerCAmelCase ) ,__lowerCAmelCase )
self.assertEqual(getattr(__lowerCAmelCase ,attr + '_id' ) ,__lowerCAmelCase )
setattr(__lowerCAmelCase ,'additional_special_tokens_ids' ,[] )
self.assertListEqual(getattr(__lowerCAmelCase ,'additional_special_tokens' ) ,[] )
self.assertListEqual(getattr(__lowerCAmelCase ,'additional_special_tokens_ids' ) ,[] )
_lowerCAmelCase : Optional[int] = 0xE006
_lowerCAmelCase : Dict = chr(__lowerCAmelCase )
setattr(__lowerCAmelCase ,'additional_special_tokens_ids' ,[additional_special_token_id] )
self.assertListEqual(getattr(__lowerCAmelCase ,'additional_special_tokens' ) ,[additional_special_token] )
self.assertListEqual(getattr(__lowerCAmelCase ,'additional_special_tokens_ids' ) ,[additional_special_token_id] )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
| 259 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
lowerCAmelCase__ = False
class __snake_case ( unittest.TestCase):
pass
@nightly
@require_torch_gpu
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : int = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : str = '''A painting of a squirrel eating a burger '''
_lowerCamelCase : Dict = torch.manual_seed(0 )
_lowerCamelCase : List[Any] = pipe(
prompt=__lowerCAmelCase , generator=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : Tuple = VersatileDiffusionTextToImagePipeline.from_pretrained(__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : int = generator.manual_seed(0 )
_lowerCamelCase : List[str] = pipe(
prompt=__lowerCAmelCase , generator=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = VersatileDiffusionTextToImagePipeline.from_pretrained(
'''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = '''A painting of a squirrel eating a burger '''
_lowerCamelCase : Optional[int] = torch.manual_seed(0 )
_lowerCamelCase : int = pipe(
prompt=__lowerCAmelCase , generator=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' ).images
_lowerCamelCase : List[str] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : Dict = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 83 | 0 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class UpperCamelCase :
def __init__( self : Dict , snake_case__ : Optional[int] , snake_case__ : Tuple=1_3 , snake_case__ : Any=2 , snake_case__ : List[str]=2_4 , snake_case__ : str=1_6 , snake_case__ : List[Any]=True , snake_case__ : Union[str, Any]=True , snake_case__ : Optional[Any]=3_2 , snake_case__ : List[Any]=5 , snake_case__ : int=4 , snake_case__ : int=3_7 , snake_case__ : Union[str, Any]="gelu" , snake_case__ : Dict=0.1 , snake_case__ : str=0.1 , snake_case__ : int=1_0 , snake_case__ : List[Any]=0.02 , snake_case__ : str=None , snake_case__ : List[str]=2 , snake_case__ : Union[str, Any]=2 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = max_length
SCREAMING_SNAKE_CASE = num_mel_bins
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = frequency_stride
SCREAMING_SNAKE_CASE = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
SCREAMING_SNAKE_CASE = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
SCREAMING_SNAKE_CASE = (self.max_length - self.patch_size) // self.time_stride + 1
SCREAMING_SNAKE_CASE = frequency_out_dimension * time_out_dimension
SCREAMING_SNAKE_CASE = num_patches + 2
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_values, labels
def UpperCamelCase ( self : str ):
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def UpperCamelCase ( self : Any , snake_case__ : Tuple , snake_case__ : Any , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ASTModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
SCREAMING_SNAKE_CASE
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( _lowercase , _lowercase , unittest.TestCase ):
__UpperCamelCase =(
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
__UpperCamelCase =(
{"audio-classification": ASTForAudioClassification, "feature-extraction": ASTModel}
if is_torch_available()
else {}
)
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =False
def UpperCamelCase ( self : Dict , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Optional[Any] ):
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ASTModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=3_7 )
def UpperCamelCase ( self : str ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='AST does not use inputs_embeds' )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
pass
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase , nn.Linear ) )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(__lowerCAmelCase )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ['''input_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
@slow
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = ASTModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def __lowerCAmelCase ( ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' )
SCREAMING_SNAKE_CASE = torchaudio.load(A_ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class UpperCamelCase ( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' )
if is_torchaudio_available()
else None
)
@slow
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.default_feature_extractor
SCREAMING_SNAKE_CASE = ASTForAudioClassification.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' ).to(__lowerCAmelCase )
SCREAMING_SNAKE_CASE = self.default_feature_extractor
SCREAMING_SNAKE_CASE = prepare_audio()
SCREAMING_SNAKE_CASE = audio.squeeze().numpy()
SCREAMING_SNAKE_CASE = feature_extractor(__lowerCAmelCase , sampling_rate=__lowerCAmelCase , return_tensors='pt' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**__lowerCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 5_2_7) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
SCREAMING_SNAKE_CASE = torch.tensor([-0.8_760, -7.0_042, -8.6_602] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
| 439 |
"""simple docstring"""
import os
import time
import numpy as np
import onnxruntime as ort
lowerCAmelCase__ = '''1'''
lowerCAmelCase__ = '''0'''
lowerCAmelCase__ = '''1'''
lowerCAmelCase__ = ort.SessionOptions()
lowerCAmelCase__ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('''Create inference session...''')
lowerCAmelCase__ = ['''TensorrtExecutionProvider''', '''CUDAExecutionProvider''']
lowerCAmelCase__ = ort.InferenceSession('''model.onnx''', sess_options=sess_opt, providers=execution_provider)
lowerCAmelCase__ = ort.RunOptions()
lowerCAmelCase__ = 128
lowerCAmelCase__ = 1
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
print('''Warm up phase...''')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Start inference...''')
lowerCAmelCase__ = time.time()
lowerCAmelCase__ = 2000
lowerCAmelCase__ = {}
for iter in range(max_iters):
lowerCAmelCase__ = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Average Inference Time = {:.3f} ms'''.format((time.time() - start_time) * 1000 / max_iters))
| 83 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class UpperCAmelCase_ ( _lowercase ):
'''simple docstring'''
__A : Optional[Any] = "camembert"
def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=2 , __A=0.02 , __A=1e-12 , __A=1 , __A=0 , __A=2 , __A="absolute" , __A=True , __A=None , **__A , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
lowerCamelCase : Tuple = vocab_size
lowerCamelCase : str = hidden_size
lowerCamelCase : Union[str, Any] = num_hidden_layers
lowerCamelCase : Any = num_attention_heads
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : List[str] = intermediate_size
lowerCamelCase : Optional[Any] = hidden_dropout_prob
lowerCamelCase : List[Any] = attention_probs_dropout_prob
lowerCamelCase : Optional[Any] = max_position_embeddings
lowerCamelCase : Tuple = type_vocab_size
lowerCamelCase : Tuple = initializer_range
lowerCamelCase : Dict = layer_norm_eps
lowerCamelCase : List[Any] = position_embedding_type
lowerCamelCase : int = use_cache
lowerCamelCase : List[str] = classifier_dropout
class UpperCAmelCase_ ( _lowercase ):
'''simple docstring'''
@property
def _snake_case ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
lowerCamelCase : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 340 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def snake_case_ ( A_ : float, A_ : float, A_ : int ):
'''simple docstring'''
_lowerCamelCase : List[Any] = x
_lowerCamelCase : List[Any] = y
for step in range(A_ ): # noqa: B007
_lowerCamelCase : Dict = a * a - b * b + x
_lowerCamelCase : List[str] = 2 * a * b + y
_lowerCamelCase : Any = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def snake_case_ ( A_ : float ):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def snake_case_ ( A_ : float ):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(A_, 1, 1 ) )
def snake_case_ ( A_ : int = 8_00, A_ : int = 6_00, A_ : float = -0.6, A_ : float = 0, A_ : float = 3.2, A_ : int = 50, A_ : bool = True, ):
'''simple docstring'''
_lowerCamelCase : Tuple = Image.new('''RGB''', (image_width, image_height) )
_lowerCamelCase : int = img.load()
# loop through the image-coordinates
for image_x in range(A_ ):
for image_y in range(A_ ):
# determine the figure-coordinates based on the image-coordinates
_lowerCamelCase : Optional[Any] = figure_width / image_width * image_height
_lowerCamelCase : List[Any] = figure_center_x + (image_x / image_width - 0.5) * figure_width
_lowerCamelCase : Optional[Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
_lowerCamelCase : str = get_distance(A_, A_, A_ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_lowerCamelCase : Dict = get_color_coded_rgb(A_ )
else:
_lowerCamelCase : str = get_black_and_white_rgb(A_ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowerCAmelCase__ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 83 | 0 |
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) # pylint: disable=invalid-name
SCREAMING_SNAKE_CASE__ = 2_5_6
class __lowerCAmelCase ( _lowercase ):
"""simple docstring"""
A__ : Optional[Any] = ["melgan"]
def __init__( self : List[Any] , _snake_case : SpectrogramNotesEncoder , _snake_case : SpectrogramContEncoder , _snake_case : TaFilmDecoder , _snake_case : DDPMScheduler , _snake_case : OnnxRuntimeModel if is_onnx_available() else Any , ):
"""simple docstring"""
super().__init__()
# From MELGAN
A__ = math.log(1E-5 ) # Matches MelGAN training.
A__ = 4.0 # Largest value for most examples
A__ = 1_28
self.register_modules(
notes_encoder=__lowerCAmelCase , continuous_encoder=__lowerCAmelCase , decoder=__lowerCAmelCase , scheduler=__lowerCAmelCase , melgan=__lowerCAmelCase , )
def _a ( self : List[str] , _snake_case : Union[str, Any] , _snake_case : List[Any]=(-1.0, 1.0) , _snake_case : Union[str, Any]=False ):
"""simple docstring"""
A__ = output_range
if clip:
A__ = torch.clip(__lowerCAmelCase , self.min_value , self.max_value )
# Scale to [0, 1].
A__ = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def _a ( self : Union[str, Any] , _snake_case : List[Any] , _snake_case : Optional[Any]=(-1.0, 1.0) , _snake_case : Optional[int]=False ):
"""simple docstring"""
A__ = input_range
A__ = torch.clip(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if clip else outputs
# Scale to [0, 1].
A__ = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def _a ( self : List[Any] , _snake_case : List[str] , _snake_case : Optional[Any] , _snake_case : List[str] ):
"""simple docstring"""
A__ = input_tokens > 0
A__ = self.notes_encoder(
encoder_input_tokens=__lowerCAmelCase , encoder_inputs_mask=__lowerCAmelCase )
A__ = self.continuous_encoder(
encoder_inputs=__lowerCAmelCase , encoder_inputs_mask=__lowerCAmelCase )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def _a ( self : Any , _snake_case : Dict , _snake_case : List[str] , _snake_case : Optional[int] ):
"""simple docstring"""
A__ = noise_time
if not torch.is_tensor(__lowerCAmelCase ):
A__ = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(__lowerCAmelCase ) and len(timesteps.shape ) == 0:
A__ = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
A__ = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
A__ = self.decoder(
encodings_and_masks=__lowerCAmelCase , decoder_input_tokens=__lowerCAmelCase , decoder_noise_time=__lowerCAmelCase )
return logits
@torch.no_grad()
def __call__( self : str , _snake_case : List[List[int]] , _snake_case : Optional[torch.Generator] = None , _snake_case : int = 1_00 , _snake_case : bool = True , _snake_case : str = "numpy" , _snake_case : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _snake_case : int = 1 , ):
"""simple docstring"""
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(__lowerCAmelCase )}.''' )
A__ = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
A__ = np.zeros([1, 0, self.n_dims] , np.floataa )
A__ = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=__lowerCAmelCase , device=self.device )
for i, encoder_input_tokens in enumerate(__lowerCAmelCase ):
if i == 0:
A__ = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
A__ = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=__lowerCAmelCase , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
A__ = ones
A__ = self.scale_features(
__lowerCAmelCase , output_range=[-1.0, 1.0] , clip=__lowerCAmelCase )
A__ = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=__lowerCAmelCase , continuous_mask=__lowerCAmelCase , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
A__ = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=__lowerCAmelCase , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(__lowerCAmelCase )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
A__ = self.decode(
encodings_and_masks=__lowerCAmelCase , input_tokens=__lowerCAmelCase , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
A__ = self.scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample
A__ = self.scale_to_features(__lowerCAmelCase , input_range=[-1.0, 1.0] )
A__ = mel[:1]
A__ = mel.cpu().float().numpy()
A__ = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__lowerCAmelCase , __lowerCAmelCase )
logger.info('Generated segment' , __lowerCAmelCase )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.' )
if output_type == "numpy":
A__ = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
A__ = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=__lowerCAmelCase )
| 9 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def snake_case_ ( A_ : Tuple, A_ : List[str], A_ : Optional[Any], A_ : Dict, A_ : Dict=True, A_ : int="pt" ):
'''simple docstring'''
_lowerCamelCase : str = {'''add_prefix_space''': True} if isinstance(A_, A_ ) and not line.startswith(''' ''' ) else {}
_lowerCamelCase : Union[str, Any] = padding_side
return tokenizer(
[line], max_length=A_, padding='''max_length''' if pad_to_max_length else None, truncation=A_, return_tensors=A_, add_special_tokens=A_, **A_, )
def snake_case_ ( A_ : Any, A_ : Optional[int], A_ : List[Any]=None, ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = input_ids.ne(A_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __snake_case ( _lowercase):
def __init__( self : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple="train" , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Any=None , __lowerCAmelCase : Union[str, Any]="" , ):
"""simple docstring"""
super().__init__()
_lowerCamelCase : Optional[int] = Path(__lowerCAmelCase ).joinpath(type_path + '''.source''' )
_lowerCamelCase : List[str] = Path(__lowerCAmelCase ).joinpath(type_path + '''.target''' )
_lowerCamelCase : List[Any] = self.get_char_lens(self.src_file )
_lowerCamelCase : Optional[int] = max_source_length
_lowerCamelCase : Optional[Any] = max_target_length
assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}'''
_lowerCamelCase : List[Any] = tokenizer
_lowerCamelCase : List[Any] = prefix
if n_obs is not None:
_lowerCamelCase : List[str] = self.src_lens[:n_obs]
_lowerCamelCase : int = src_lang
_lowerCamelCase : Union[str, Any] = tgt_lang
def __len__( self : int ):
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self : Dict , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = index + 1 # linecache starts at 1
_lowerCamelCase : Union[str, Any] = self.prefix + linecache.getline(str(self.src_file ) , __lowerCAmelCase ).rstrip('''\n''' )
_lowerCamelCase : Optional[Any] = linecache.getline(str(self.tgt_file ) , __lowerCAmelCase ).rstrip('''\n''' )
assert source_line, f'''empty source line for index {index}'''
assert tgt_line, f'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , __lowerCAmelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_lowerCamelCase : Optional[int] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , __lowerCAmelCase ) else self.tokenizer
)
_lowerCamelCase : Union[str, Any] = self.tokenizer.generator if isinstance(self.tokenizer , __lowerCAmelCase ) else self.tokenizer
_lowerCamelCase : List[str] = encode_line(__lowerCAmelCase , __lowerCAmelCase , self.max_source_length , '''right''' )
_lowerCamelCase : List[str] = encode_line(__lowerCAmelCase , __lowerCAmelCase , self.max_target_length , '''right''' )
_lowerCamelCase : Optional[Any] = source_inputs['''input_ids'''].squeeze()
_lowerCamelCase : Union[str, Any] = target_inputs['''input_ids'''].squeeze()
_lowerCamelCase : Any = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase : str ):
"""simple docstring"""
return [len(__lowerCAmelCase ) for x in Path(__lowerCAmelCase ).open().readlines()]
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : List[Any] = torch.stack([x['''input_ids'''] for x in batch] )
_lowerCamelCase : Tuple = torch.stack([x['''attention_mask'''] for x in batch] )
_lowerCamelCase : Union[str, Any] = torch.stack([x['''decoder_input_ids'''] for x in batch] )
_lowerCamelCase : Tuple = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , __lowerCAmelCase )
else self.tokenizer.pad_token_id
)
_lowerCamelCase : Tuple = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , __lowerCAmelCase )
else self.tokenizer.pad_token_id
)
_lowerCamelCase : Union[str, Any] = trim_batch(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : List[str] = trim_batch(__lowerCAmelCase , __lowerCAmelCase , attention_mask=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
lowerCAmelCase__ = getLogger(__name__)
def snake_case_ ( A_ : List[List] ):
'''simple docstring'''
return list(itertools.chain.from_iterable(A_ ) )
def snake_case_ ( A_ : str ):
'''simple docstring'''
_lowerCamelCase : Dict = get_git_info()
save_json(A_, os.path.join(A_, '''git_log.json''' ) )
def snake_case_ ( A_ : str, A_ : Union[str, Any], A_ : int=4, **A_ : Optional[int] ):
'''simple docstring'''
with open(A_, '''w''' ) as f:
json.dump(A_, A_, indent=A_, **A_ )
def snake_case_ ( A_ : Any ):
'''simple docstring'''
with open(A_ ) as f:
return json.load(A_ )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : List[str] = git.Repo(search_parent_directories=A_ )
_lowerCamelCase : str = {
'''repo_id''': str(A_ ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def snake_case_ ( A_ : Callable, A_ : Iterable ):
'''simple docstring'''
return list(map(A_, A_ ) )
def snake_case_ ( A_ : str, A_ : Tuple ):
'''simple docstring'''
with open(A_, '''wb''' ) as f:
return pickle.dump(A_, A_ )
def snake_case_ ( A_ : List[str] ):
'''simple docstring'''
def remove_articles(A_ : str ):
return re.sub(R'''\b(a|an|the)\b''', ''' ''', A_ )
def white_space_fix(A_ : Any ):
return " ".join(text.split() )
def remove_punc(A_ : List[Any] ):
_lowerCamelCase : Any = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(A_ : Optional[int] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(A_ ) ) ) )
def snake_case_ ( A_ : int, A_ : List[Any] ):
'''simple docstring'''
_lowerCamelCase : str = normalize_answer(A_ ).split()
_lowerCamelCase : int = normalize_answer(A_ ).split()
_lowerCamelCase : str = Counter(A_ ) & Counter(A_ )
_lowerCamelCase : Any = sum(common.values() )
if num_same == 0:
return 0
_lowerCamelCase : int = 1.0 * num_same / len(A_ )
_lowerCamelCase : str = 1.0 * num_same / len(A_ )
_lowerCamelCase : List[Any] = (2 * precision * recall) / (precision + recall)
return fa
def snake_case_ ( A_ : Dict, A_ : str ):
'''simple docstring'''
return normalize_answer(A_ ) == normalize_answer(A_ )
def snake_case_ ( A_ : List[str], A_ : List[str] ):
'''simple docstring'''
assert len(A_ ) == len(A_ )
_lowerCamelCase : Optional[Any] = 0
for hypo, pred in zip(A_, A_ ):
em += exact_match_score(A_, A_ )
if len(A_ ) > 0:
em /= len(A_ )
return {"em": em}
def snake_case_ ( A_ : Optional[int] ):
'''simple docstring'''
return model_prefix.startswith('''rag''' )
def snake_case_ ( A_ : Dict, A_ : int, A_ : List[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_lowerCamelCase : Tuple = '''dropout_rate'''
for p in extra_params:
if getattr(A_, A_, A_ ):
if not hasattr(A_, A_ ) and not hasattr(A_, equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(A_ ) )
delattr(A_, A_ )
continue
_lowerCamelCase : Union[str, Any] = p if hasattr(A_, A_ ) else equivalent_param[p]
setattr(A_, A_, getattr(A_, A_ ) )
delattr(A_, A_ )
return hparams, config
| 83 | 0 |
import torch
from transformers import AutoModel
class __magic_name__ (torch.nn.Module ):
'''simple docstring'''
def __init__( self:Any , _a:Any="sayef/fsner-bert-base-uncased" ):
super(__lowerCAmelCase , self ).__init__()
snake_case__ = AutoModel.from_pretrained(__lowerCAmelCase , return_dict=__lowerCAmelCase )
snake_case__ = torch.nn.CosineSimilarity(3 , 1e-08 )
snake_case__ = torch.nn.Softmax(dim=1 )
def SCREAMING_SNAKE_CASE__ ( self:str , **_a:int ):
return self.bert(**__lowerCAmelCase ).last_hidden_state
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:Tuple ):
return token_embeddings.sum(2 , keepdim=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:Any , _a:Dict , _a:List[str]=1 ):
return self.softmax(T * self.cos(__lowerCAmelCase , __lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:List[Any] , _a:Any ):
snake_case__ = W_supports['''sizes'''].tolist()
snake_case__ = W_supports['''start_token_id'''].item()
snake_case__ = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
snake_case__ = self.BERT(**__lowerCAmelCase )
snake_case__ = self.BERT(**__lowerCAmelCase )
snake_case__ = None
snake_case__ = None
snake_case__ = W_supports['''input_ids'''] == start_token_id
snake_case__ = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(__lowerCAmelCase ):
if i == 0:
snake_case__ = 0
else:
snake_case__ = support_sizes[i - 1]
snake_case__ = S[s : s + size][start_token_masks[s : s + size]]
snake_case__ = S[s : s + size][end_token_masks[s : s + size]]
snake_case__ = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
snake_case__ = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
snake_case__ = torch.vstack((p_starts, p_start) )
snake_case__ = torch.vstack((p_ends, p_end) )
else:
snake_case__ = p_start
snake_case__ = p_end
return p_starts, p_ends
| 33 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class __snake_case ( _lowercase):
snake_case__ : Optional[Any] = "camembert"
def __init__( self : Optional[Any] , __lowerCAmelCase : Any=3_0_5_2_2 , __lowerCAmelCase : List[str]=7_6_8 , __lowerCAmelCase : List[str]=1_2 , __lowerCAmelCase : Optional[int]=1_2 , __lowerCAmelCase : List[Any]=3_0_7_2 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Optional[int]=5_1_2 , __lowerCAmelCase : str=2 , __lowerCAmelCase : int=0.02 , __lowerCAmelCase : List[Any]=1E-12 , __lowerCAmelCase : Union[str, Any]=1 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : str="absolute" , __lowerCAmelCase : Any=True , __lowerCAmelCase : Optional[int]=None , **__lowerCAmelCase : Optional[int] , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : str = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Any = num_attention_heads
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : List[str] = intermediate_size
_lowerCamelCase : Optional[Any] = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Optional[Any] = max_position_embeddings
_lowerCamelCase : Tuple = type_vocab_size
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : Dict = layer_norm_eps
_lowerCamelCase : List[Any] = position_embedding_type
_lowerCamelCase : int = use_cache
_lowerCamelCase : List[str] = classifier_dropout
class __snake_case ( _lowercase):
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
if self.task == "multiple-choice":
_lowerCamelCase : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_lowerCamelCase : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 83 | 0 |
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowerCAmelCase__: Any = logging.get_logger(__name__)
lowerCAmelCase__: Optional[Any] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn.grep_linear": "encoder.layers.*.attention.gru_rel_pos_linear",
"self_attn.relative_attention_bias": "encoder.layers.*.attention.rel_attn_embed",
"self_attn.grep_a": "encoder.layers.*.attention.gru_rel_pos_const",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
lowerCAmelCase__: str = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
for attribute in key.split('.' ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = getattr(A_ , A_ )
if weight_type is not None:
SCREAMING_SNAKE_CASE_ : Dict = getattr(A_ , A_ ).shape
else:
SCREAMING_SNAKE_CASE_ : int = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE_ : str = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE_ : List[Any] = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE_ : Optional[Any] = value
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
SCREAMING_SNAKE_CASE_ : Optional[int] = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE_ : Tuple = hf_model.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE_ : str = False
if "conv_layers" in name:
load_conv_layer(
A_ , A_ , A_ , A_ , hf_model.config.feat_extract_norm == 'group' , )
SCREAMING_SNAKE_CASE_ : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
SCREAMING_SNAKE_CASE_ : Tuple = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE_ : int = name.split(A_ )[0].split('.' )[-2]
SCREAMING_SNAKE_CASE_ : List[str] = mapped_key.replace('*' , A_ )
if "weight_g" in name:
SCREAMING_SNAKE_CASE_ : int = '''weight_g'''
elif "weight_v" in name:
SCREAMING_SNAKE_CASE_ : List[Any] = '''weight_v'''
elif "bias" in name and "relative_attention_bias" not in name:
SCREAMING_SNAKE_CASE_ : Dict = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
SCREAMING_SNAKE_CASE_ : int = '''weight'''
else:
SCREAMING_SNAKE_CASE_ : Tuple = None
set_recursively(A_ , A_ , A_ , A_ , A_ )
continue
if not is_used:
unused_weights.append(A_ )
logger.warning(f'Unused weights: {unused_weights}' )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = full_name.split('conv_layers.' )[-1]
SCREAMING_SNAKE_CASE_ : str = name.split('.' )
SCREAMING_SNAKE_CASE_ : str = int(items[0] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
SCREAMING_SNAKE_CASE_ : int = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
SCREAMING_SNAKE_CASE_ : List[Any] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
SCREAMING_SNAKE_CASE_ : Optional[int] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(A_ )
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ) -> List[str]:
SCREAMING_SNAKE_CASE_ : Any = torch.load(A_ )
SCREAMING_SNAKE_CASE_ : Any = WavLMConfigOrig(checkpoint['cfg'] )
SCREAMING_SNAKE_CASE_ : List[str] = WavLMOrig(A_ )
model.load_state_dict(checkpoint['model'] )
model.eval()
if config_path is not None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = WavLMConfig.from_pretrained(A_ )
else:
SCREAMING_SNAKE_CASE_ : str = WavLMConfig()
SCREAMING_SNAKE_CASE_ : int = WavLMModel(A_ )
recursively_load_weights(A_ , A_ )
hf_wavlm.save_pretrained(A_ )
if __name__ == "__main__":
lowerCAmelCase__: str = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
lowerCAmelCase__: List[Any] = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 345 |
"""simple docstring"""
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCAmelCase__ = '''<<<<<<< This should probably be modified because it mentions: '''
lowerCAmelCase__ = '''=======
>>>>>>>
'''
lowerCAmelCase__ = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
lowerCAmelCase__ = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def snake_case_ ( A_ : Namespace ):
'''simple docstring'''
return ConvertCommand(args.tfds_path, args.datasets_directory )
class __snake_case ( _lowercase):
@staticmethod
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase : ArgumentParser ):
"""simple docstring"""
_lowerCamelCase : List[str] = parser.add_parser(
'''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , )
train_parser.add_argument(
'''--tfds_path''' , type=__lowerCAmelCase , required=__lowerCAmelCase , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , )
train_parser.add_argument(
'''--datasets_directory''' , type=__lowerCAmelCase , required=__lowerCAmelCase , help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=__lowerCAmelCase )
def __init__( self : str , __lowerCAmelCase : str , __lowerCAmelCase : str , *__lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : List[str] = get_logger('''datasets-cli/converting''' )
_lowerCamelCase : int = tfds_path
_lowerCamelCase : Dict = datasets_directory
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
if os.path.isdir(self._tfds_path ):
_lowerCamelCase : Union[str, Any] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
_lowerCamelCase : Dict = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
_lowerCamelCase : int = os.path.abspath(self._datasets_directory )
self._logger.info(f'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
_lowerCamelCase : str = []
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : Union[str, Any] = {}
if os.path.isdir(self._tfds_path ):
_lowerCamelCase : List[str] = os.listdir(__lowerCAmelCase )
else:
_lowerCamelCase : Optional[Any] = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'''Looking at file {f_name}''' )
_lowerCamelCase : Union[str, Any] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Optional[Any] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
if not os.path.isfile(__lowerCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(__lowerCAmelCase , encoding='''utf-8''' ) as f:
_lowerCamelCase : Tuple = f.readlines()
_lowerCamelCase : Optional[int] = []
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : int = False
_lowerCamelCase : Tuple = []
for line in lines:
_lowerCamelCase : Optional[int] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
_lowerCamelCase : Union[str, Any] = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
_lowerCamelCase : List[str] = ''''''
continue
elif "from absl import logging" in out_line:
_lowerCamelCase : str = '''from datasets import logging\n'''
elif "getLogger" in out_line:
_lowerCamelCase : Union[str, Any] = out_line.replace('''getLogger''' , '''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
_lowerCamelCase : Dict = True
_lowerCamelCase : Optional[int] = list(filter(lambda __lowerCAmelCase : e in out_line , __lowerCAmelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(__lowerCAmelCase ) + '''\n''' )
out_lines.append(__lowerCAmelCase )
out_lines.append(__lowerCAmelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
_lowerCamelCase : str = re.sub(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
_lowerCamelCase : Dict = re.match(R'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , __lowerCAmelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
_lowerCamelCase : Union[str, Any] = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
_lowerCamelCase : Any = True
out_lines.append(__lowerCAmelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
_lowerCamelCase : Union[str, Any] = f_name.replace('''.py''' , '''''' )
_lowerCamelCase : List[str] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[Any] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
self._logger.info(f'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(__lowerCAmelCase )
if needs_manual_update:
with_manual_update.append(__lowerCAmelCase )
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.writelines(__lowerCAmelCase )
self._logger.info(f'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
_lowerCamelCase : Optional[int] = os.path.basename(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = imports_to_builder_map[f_name.replace('''.py''' , '''''' )]
self._logger.info(f'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(__lowerCAmelCase , __lowerCAmelCase )
except KeyError:
self._logger.error(f'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 83 | 0 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class snake_case_ :
'''simple docstring'''
__UpperCamelCase = None
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = True
__UpperCamelCase = None
__UpperCamelCase = 1
__UpperCamelCase = None
__UpperCamelCase = False
__UpperCamelCase = None
__UpperCamelCase = None
def __UpperCAmelCase ( self ) -> str:
return self.__class__(**{k: copy.deepcopy(__lowerCAmelCase ) for k, v in self.__dict__.items()} )
| 625 |
"""simple docstring"""
def snake_case_ ( A_ : list, A_ : list, A_ : int, A_ : int, A_ : int ):
'''simple docstring'''
if index == number_of_items:
return 0
_lowerCamelCase : int = 0
_lowerCamelCase : str = 0
_lowerCamelCase : Dict = knapsack(A_, A_, A_, A_, index + 1 )
if weights[index] <= max_weight:
_lowerCamelCase : Tuple = values[index] + knapsack(
A_, A_, A_, max_weight - weights[index], index + 1 )
return max(A_, A_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 | 0 |
lowerCamelCase = {
0: '0',
1: '1',
2: '2',
3: '3',
4: '4',
5: '5',
6: '6',
7: '7',
8: '8',
9: '9',
10: 'a',
11: 'b',
12: 'c',
13: 'd',
14: 'e',
15: 'f',
}
def a_ ( SCREAMING_SNAKE_CASE__ : float ):
'''simple docstring'''
assert type(A_ ) in (int, float) and decimal == int(A_ )
_lowerCamelCase : Union[str, Any] =int(A_ )
_lowerCamelCase : Any =''''''
_lowerCamelCase : Optional[Any] =False
if decimal < 0:
_lowerCamelCase : Union[str, Any] =True
decimal *= -1
while decimal > 0:
_lowerCamelCase : Union[str, Any] =divmod(A_ , 16 )
_lowerCamelCase : List[Any] =values[remainder] + hexadecimal
_lowerCamelCase : List[str] ='''0x''' + hexadecimal
if negative:
_lowerCamelCase : List[Any] ='''-''' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 464 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __snake_case ( metaclass=_lowercase):
snake_case__ : Optional[Any] = ["transformers", "torch", "note_seq"]
def __init__( self : Union[str, Any] , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : List[Any] ):
"""simple docstring"""
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[Any] , *__lowerCAmelCase : str , **__lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : int , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 83 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A ={
'configuration_jukebox': [
'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP',
'JukeboxConfig',
'JukeboxPriorConfig',
'JukeboxVQVAEConfig',
],
'tokenization_jukebox': ['JukeboxTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'JukeboxModel',
'JukeboxPreTrainedModel',
'JukeboxVQVAE',
'JukeboxPrior',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
__A =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 407 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __snake_case ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Any = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
_lowerCamelCase : Any = AutoTokenizer.from_pretrained('''google/mt5-small''' )
_lowerCamelCase : Union[str, Any] = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
_lowerCamelCase : Optional[int] = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
_lowerCamelCase : List[Any] = shift_tokens_right(__lowerCAmelCase , model.config.pad_token_id , model.config.decoder_start_token_id )
_lowerCamelCase : int = model(__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase ).logits
_lowerCamelCase : Optional[Any] = optax.softmax_cross_entropy(__lowerCAmelCase , onehot(__lowerCAmelCase , logits.shape[-1] ) ).mean()
_lowerCamelCase : Dict = -(labels.shape[-1] * loss.item())
_lowerCamelCase : Dict = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 83 | 0 |
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class UpperCAmelCase ( _lowercase):
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : NestedDataStructureLike[PathLike] , UpperCamelCase__ : Optional[NamedSplit] = None , UpperCamelCase__ : Optional[Features] = None , UpperCamelCase__ : str = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[int] = None , **UpperCamelCase__ : Optional[Any] , ) -> List[str]:
super().__init__(
__lowerCAmelCase , split=__lowerCAmelCase , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase , streaming=__lowerCAmelCase , num_proc=__lowerCAmelCase , **__lowerCAmelCase , )
_UpperCamelCase =field
_UpperCamelCase =path_or_paths if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else {self.split: path_or_paths}
_UpperCamelCase =Json(
cache_dir=__lowerCAmelCase , data_files=__lowerCAmelCase , features=__lowerCAmelCase , field=__lowerCAmelCase , **__lowerCAmelCase , )
def UpperCamelCase__ ( self : int ) -> Any:
if self.streaming:
_UpperCamelCase =self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_UpperCamelCase =None
_UpperCamelCase =None
_UpperCamelCase =None
_UpperCamelCase =None
self.builder.download_and_prepare(
download_config=__lowerCAmelCase , download_mode=__lowerCAmelCase , verification_mode=__lowerCAmelCase , base_path=__lowerCAmelCase , num_proc=self.num_proc , )
_UpperCamelCase =self.builder.as_dataset(
split=self.split , verification_mode=__lowerCAmelCase , in_memory=self.keep_in_memory )
return dataset
class UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int] , UpperCamelCase__ : Dataset , UpperCamelCase__ : Union[PathLike, BinaryIO] , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , **UpperCamelCase__ : Dict , ) -> Union[str, Any]:
if num_proc is not None and num_proc <= 0:
raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' )
_UpperCamelCase =dataset
_UpperCamelCase =path_or_buf
_UpperCamelCase =batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_UpperCamelCase =num_proc
_UpperCamelCase ='''utf-8'''
_UpperCamelCase =to_json_kwargs
def UpperCamelCase__ ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase =self.to_json_kwargs.pop('''path_or_buf''' , __lowerCAmelCase )
_UpperCamelCase =self.to_json_kwargs.pop('''orient''' , '''records''' )
_UpperCamelCase =self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False )
_UpperCamelCase =self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True )
_UpperCamelCase =self.to_json_kwargs.pop('''compression''' , __lowerCAmelCase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'''`datasets` currently does not support {compression} compression''' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , '''wb''' , compression=__lowerCAmelCase ) as buffer:
_UpperCamelCase =self._write(file_obj=__lowerCAmelCase , orient=__lowerCAmelCase , lines=__lowerCAmelCase , index=__lowerCAmelCase , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
''' was passed. Please provide a local path instead.''' )
_UpperCamelCase =self._write(
file_obj=self.path_or_buf , orient=__lowerCAmelCase , lines=__lowerCAmelCase , index=__lowerCAmelCase , **self.to_json_kwargs )
return written
def UpperCamelCase__ ( self : Dict , UpperCamelCase__ : Optional[int] ) -> int:
_UpperCamelCase =args
_UpperCamelCase =query_table(
table=self.dataset.data , key=slice(__lowerCAmelCase , offset + self.batch_size ) , indices=self.dataset._indices , )
_UpperCamelCase =batch.to_pandas().to_json(
path_or_buf=__lowerCAmelCase , orient=__lowerCAmelCase , lines=__lowerCAmelCase , index=__lowerCAmelCase , **__lowerCAmelCase )
if not json_str.endswith('''\n''' ):
json_str += "\n"
return json_str.encode(self.encoding )
def UpperCamelCase__ ( self : List[str] , UpperCamelCase__ : BinaryIO , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : Optional[Any] , ) -> Dict:
_UpperCamelCase =0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
_UpperCamelCase =self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(__lowerCAmelCase )
else:
_UpperCamelCase =len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , __lowerCAmelCase , __lowerCAmelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
written += file_obj.write(__lowerCAmelCase )
return written
| 404 |
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'''files''', [
['''full:README.md''', '''dataset_infos.json'''],
['''empty:README.md''', '''dataset_infos.json'''],
['''dataset_infos.json'''],
['''full:README.md'''],
], )
def snake_case_ ( A_ : Dict, A_ : List[str] ):
'''simple docstring'''
_lowerCamelCase : int = tmp_path_factory.mktemp('''dset_infos_dir''' )
if "full:README.md" in files:
with open(dataset_infos_dir / '''README.md''', '''w''' ) as f:
f.write('''---\ndataset_info:\n dataset_size: 42\n---''' )
if "empty:README.md" in files:
with open(dataset_infos_dir / '''README.md''', '''w''' ) as f:
f.write('''''' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / '''dataset_infos.json''', '''w''' ) as f:
f.write('''{"default": {"dataset_size": 42}}''' )
_lowerCamelCase : str = DatasetInfosDict.from_directory(A_ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'''dataset_info''', [
DatasetInfo(),
DatasetInfo(
description='''foo''', features=Features({'''a''': Value('''int32''' )} ), builder_name='''builder''', config_name='''config''', version='''1.0.0''', splits=[{'''name''': '''train'''}], download_size=42, ),
], )
def snake_case_ ( A_ : str, A_ : DatasetInfo ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = str(A_ )
dataset_info.write_to_directory(A_ )
_lowerCamelCase : str = DatasetInfo.from_directory(A_ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(A_, '''dataset_info.json''' ) )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = DatasetInfo(
description='''foo''', citation='''bar''', homepage='''https://foo.bar''', license='''CC0''', features=Features({'''a''': Value('''int32''' )} ), post_processed={}, supervised_keys=(), task_templates=[], builder_name='''builder''', config_name='''config''', version='''1.0.0''', splits=[{'''name''': '''train''', '''num_examples''': 42}], download_checksums={}, download_size=13_37, post_processing_size=4_42, dataset_size=12_34, size_in_bytes=13_37 + 4_42 + 12_34, )
_lowerCamelCase : Optional[Any] = dataset_info._to_yaml_dict()
assert sorted(A_ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key], (list, dict, int, str) )
_lowerCamelCase : str = yaml.safe_dump(A_ )
_lowerCamelCase : Tuple = yaml.safe_load(A_ )
assert dataset_info_yaml_dict == reloaded
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : int = DatasetInfo()
_lowerCamelCase : Dict = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'''dataset_infos_dict''', [
DatasetInfosDict(),
DatasetInfosDict({'''default''': DatasetInfo()} ),
DatasetInfosDict({'''my_config_name''': DatasetInfo()} ),
DatasetInfosDict(
{
'''default''': DatasetInfo(
description='''foo''', features=Features({'''a''': Value('''int32''' )} ), builder_name='''builder''', config_name='''config''', version='''1.0.0''', splits=[{'''name''': '''train'''}], download_size=42, )
} ),
DatasetInfosDict(
{
'''v1''': DatasetInfo(dataset_size=42 ),
'''v2''': DatasetInfo(dataset_size=13_37 ),
} ),
], )
def snake_case_ ( A_ : Optional[Any], A_ : DatasetInfosDict ):
'''simple docstring'''
_lowerCamelCase : List[str] = str(A_ )
dataset_infos_dict.write_to_directory(A_ )
_lowerCamelCase : List[Any] = DatasetInfosDict.from_directory(A_ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_lowerCamelCase : str = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_lowerCamelCase : Any = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(A_, '''README.md''' ) )
| 83 | 0 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
lowerCamelCase__ = False
class __magic_name__ (unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __magic_name__ (unittest.TestCase ):
def __a ( self ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowerCAmelCase_ = '''A painting of a squirrel eating a burger '''
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(
prompt=__lowerCAmelCase , generator=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__lowerCAmelCase )
lowerCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained(__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowerCAmelCase_ = generator.manual_seed(0 )
lowerCAmelCase_ = pipe(
prompt=__lowerCAmelCase , generator=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def __a ( self ) -> List[str]:
lowerCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowerCAmelCase_ = '''A painting of a squirrel eating a burger '''
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(
prompt=__lowerCAmelCase , generator=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
lowerCAmelCase_ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase_ = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 122 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class __snake_case :
def __init__( self : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple=1_3 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : List[str]=2_4 , __lowerCAmelCase : str=1_6 , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Optional[Any]=3_2 , __lowerCAmelCase : List[Any]=5 , __lowerCAmelCase : int=4 , __lowerCAmelCase : int=3_7 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : int=1_0 , __lowerCAmelCase : List[Any]=0.02 , __lowerCAmelCase : str=None , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Union[str, Any]=2 , ):
"""simple docstring"""
_lowerCamelCase : List[str] = parent
_lowerCamelCase : str = batch_size
_lowerCamelCase : Tuple = patch_size
_lowerCamelCase : Optional[int] = max_length
_lowerCamelCase : List[Any] = num_mel_bins
_lowerCamelCase : int = is_training
_lowerCamelCase : Union[str, Any] = use_labels
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : Tuple = num_hidden_layers
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : Tuple = intermediate_size
_lowerCamelCase : List[str] = hidden_act
_lowerCamelCase : Dict = hidden_dropout_prob
_lowerCamelCase : int = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = type_sequence_label_size
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : List[str] = scope
_lowerCamelCase : Optional[int] = frequency_stride
_lowerCamelCase : List[Any] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_lowerCamelCase : Optional[int] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
_lowerCamelCase : Union[str, Any] = (self.max_length - self.patch_size) // self.time_stride + 1
_lowerCamelCase : Any = frequency_out_dimension * time_out_dimension
_lowerCamelCase : List[Any] = num_patches + 2
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : int = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
_lowerCamelCase : str = None
if self.use_labels:
_lowerCamelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : Optional[int] = self.get_config()
return config, input_values, labels
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : List[Any] = ASTModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : List[Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : int = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Optional[Any] = config_and_inputs
_lowerCamelCase : int = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : List[Any] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
snake_case__ : Tuple = (
{"audio-classification": ASTForAudioClassification, "feature-extraction": ASTModel}
if is_torch_available()
else {}
)
snake_case__ : Any = False
snake_case__ : List[Any] = False
snake_case__ : Optional[Any] = False
snake_case__ : Optional[Any] = False
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = ASTModelTester(self )
_lowerCamelCase : Any = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Dict = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCamelCase : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : str = model_class(__lowerCAmelCase )
_lowerCamelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Any = [*signature.parameters.keys()]
_lowerCamelCase : str = ['''input_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Union[str, Any] = ASTModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : List[str] = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''', filename='''sample_audio.flac''', repo_type='''dataset''' )
_lowerCamelCase , _lowerCamelCase : str = torchaudio.load(A_ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class __snake_case ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : int = self.default_feature_extractor
_lowerCamelCase : Union[str, Any] = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(__lowerCAmelCase )
_lowerCamelCase : List[Any] = self.default_feature_extractor
_lowerCamelCase , _lowerCamelCase : List[Any] = prepare_audio()
_lowerCamelCase : Dict = audio.squeeze().numpy()
_lowerCamelCase : Tuple = feature_extractor(__lowerCAmelCase , sampling_rate=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCamelCase : Tuple = model(**__lowerCAmelCase )
# verify the logits
_lowerCamelCase : Tuple = torch.Size((1, 5_2_7) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
_lowerCamelCase : Optional[int] = torch.tensor([-0.87_60, -7.00_42, -8.66_02] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
| 83 | 0 |
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = torch.load(A_ , map_location='cpu' )
if "model" in sd.keys():
_lowerCAmelCase : List[str] = torch.load(A_ , map_location='cpu' )['''model''']
# pop unnecessary weights
_lowerCAmelCase : Tuple = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(A_ )
_lowerCAmelCase : Dict = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
_lowerCAmelCase : int = sd.pop(A_ )
_lowerCAmelCase : List[Any] = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
_lowerCAmelCase : Tuple = sd[key]
# We split QKV in separate Q,K,V
_lowerCAmelCase : Any = key.replace('.qkv_proj.' , '.q_proj.' )
_lowerCAmelCase : Dict = key.replace('.qkv_proj.' , '.k_proj.' )
_lowerCAmelCase : int = key.replace('.qkv_proj.' , '.v_proj.' )
_lowerCAmelCase : Tuple = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
_lowerCAmelCase : Dict = torch.split(A_ , depth // 3 , dim=0 )
_lowerCAmelCase : Optional[Any] = q
_lowerCAmelCase : Tuple = k
_lowerCAmelCase : Tuple = v
del sd[key]
return sd
@torch.no_grad()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : Dict = load_checkpoint(A_ )
if config is not None:
_lowerCAmelCase : List[Any] = OPTConfig.from_pretrained(A_ )
else:
_lowerCAmelCase : Union[str, Any] = OPTConfig()
_lowerCAmelCase : Optional[Any] = OPTModel(A_ ).half().eval()
model.load_state_dict(A_ )
# Check results
Path(A_ ).mkdir(exist_ok=A_ )
model.save_pretrained(A_ )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fairseq_path""",
type=str,
help=(
"""path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"""
""" https://huggingface.co/models?other=opt_metasq"""
),
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""")
_lowerCAmelCase = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 259 |
"""simple docstring"""
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def snake_case_ ( A_ : Dict, A_ : bool = True, A_ : float = math.inf, A_ : float = -math.inf, A_ : float = math.inf, A_ : float = -math.inf, A_ : bool = False, A_ : float = 1_00, A_ : float = 0.01, A_ : float = 1, ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : str = search_prob
_lowerCamelCase : str = start_temperate
_lowerCamelCase : Optional[Any] = []
_lowerCamelCase : int = 0
_lowerCamelCase : Any = None
while not search_end:
_lowerCamelCase : Dict = current_state.score()
if best_state is None or current_score > best_state.score():
_lowerCamelCase : Tuple = current_state
scores.append(A_ )
iterations += 1
_lowerCamelCase : List[Any] = None
_lowerCamelCase : Optional[int] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_lowerCamelCase : List[Any] = random.randint(0, len(A_ ) - 1 ) # picking a random neighbor
_lowerCamelCase : Dict = neighbors.pop(A_ )
_lowerCamelCase : Union[str, Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_lowerCamelCase : str = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_lowerCamelCase : Optional[Any] = picked_neighbor
else:
_lowerCamelCase : Optional[int] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_lowerCamelCase : Union[str, Any] = picked_neighbor
_lowerCamelCase : List[str] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_lowerCamelCase : Tuple = True
else:
_lowerCamelCase : Optional[Any] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(A_ ), A_ )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def snake_case_ ( A_ : int, A_ : Tuple ):
'''simple docstring'''
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase__ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase__ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def snake_case_ ( A_ : Optional[int], A_ : List[Any] ):
'''simple docstring'''
return (3 * x**2) - (6 * y)
lowerCAmelCase__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F"""{local_min.score()}"""
)
lowerCAmelCase__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F"""{local_min.score()}"""
)
| 83 | 0 |
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class UpperCamelCase ( _lowercase ):
__UpperCamelCase ="philschmid/bart-large-cnn-samsum"
__UpperCamelCase =(
"This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, "
"and returns a summary of the text."
)
__UpperCamelCase ="summarizer"
__UpperCamelCase =AutoTokenizer
__UpperCamelCase =AutoModelForSeqaSeqLM
__UpperCamelCase =["text"]
__UpperCamelCase =["text"]
def UpperCamelCase ( self : Tuple , snake_case__ : str ):
"""simple docstring"""
return self.pre_processor(__lowerCAmelCase , return_tensors='pt' , truncation=__lowerCAmelCase )
def UpperCamelCase ( self : int , snake_case__ : Optional[Any] ):
"""simple docstring"""
return self.model.generate(**__lowerCAmelCase )[0]
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Optional[Any] ):
"""simple docstring"""
return self.pre_processor.decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
| 439 |
"""simple docstring"""
from collections import namedtuple
lowerCAmelCase__ = namedtuple('''from_to''', '''from_ to''')
lowerCAmelCase__ = {
'''cubicmeter''': from_to(1, 1),
'''litre''': from_to(0.0_0_1, 1000),
'''kilolitre''': from_to(1, 1),
'''gallon''': from_to(0.0_0_4_5_4, 2_6_4.1_7_2),
'''cubicyard''': from_to(0.7_6_4_5_5, 1.3_0_7_9_5),
'''cubicfoot''': from_to(0.0_2_8, 3_5.3_1_4_7),
'''cup''': from_to(0.0_0_0_2_3_6_5_8_8, 4_2_2_6.7_5),
}
def snake_case_ ( A_ : float, A_ : str, A_ : str ):
'''simple docstring'''
if from_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ ''', '''.join(A_ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ ''', '''.join(A_ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 | 0 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
__A : List[str] = CTRLTokenizer
__A : List[str] = False
__A : Any = False
def _snake_case ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase : str = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
lowerCamelCase : Dict = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
lowerCamelCase : Optional[Any] = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
lowerCamelCase : Any = {'''unk_token''': '''<unk>'''}
lowerCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__lowerCAmelCase ) )
def _snake_case ( self , **__A ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : Dict = '''adapt react readapt apt'''
lowerCamelCase : Dict = '''adapt react readapt apt'''
return input_text, output_text
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCamelCase : List[Any] = '''adapt react readapt apt'''
lowerCamelCase : Union[str, Any] = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
lowerCamelCase : Union[str, Any] = tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase : Optional[Any] = tokens + [tokenizer.unk_token]
lowerCamelCase : Any = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
| 340 |
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case ( _lowercase):
def __init__( self : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : int=1_3 , __lowerCAmelCase : Optional[int]=7 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : str=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : int=True , __lowerCAmelCase : List[str]=9_9 , __lowerCAmelCase : List[Any]=3_2 , __lowerCAmelCase : Union[str, Any]=5 , __lowerCAmelCase : List[str]=4 , __lowerCAmelCase : Optional[int]=3_7 , __lowerCAmelCase : List[Any]="gelu" , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : int=5_1_2 , __lowerCAmelCase : Tuple=1_6 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Any="None" , __lowerCAmelCase : str=3 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : Optional[Any]=None , ):
"""simple docstring"""
_lowerCamelCase : Dict = parent
_lowerCamelCase : Union[str, Any] = batch_size
_lowerCamelCase : Optional[Any] = seq_length
_lowerCamelCase : Optional[Any] = is_training
_lowerCamelCase : Dict = use_input_mask
_lowerCamelCase : Tuple = use_token_type_ids
_lowerCamelCase : Optional[Any] = use_labels
_lowerCamelCase : List[str] = vocab_size
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : int = num_hidden_layers
_lowerCamelCase : Optional[Any] = num_attention_heads
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : int = hidden_dropout_prob
_lowerCamelCase : Dict = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : List[Any] = type_sequence_label_size
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : Optional[int] = num_labels
_lowerCamelCase : Any = num_choices
_lowerCamelCase : int = relative_attention
_lowerCamelCase : Union[str, Any] = position_biased_input
_lowerCamelCase : str = pos_att_type
_lowerCamelCase : Tuple = scope
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : List[Any] = None
if self.use_input_mask:
_lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_lowerCamelCase : Any = None
if self.use_token_type_ids:
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase : Any = None
_lowerCamelCase : int = None
_lowerCamelCase : Union[str, Any] = None
if self.use_labels:
_lowerCamelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices )
_lowerCamelCase : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : str ):
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : List[str] = DebertaVaModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Dict = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )[0]
_lowerCamelCase : str = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )[0]
_lowerCamelCase : List[Any] = model(__lowerCAmelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : Tuple = DebertaVaForMaskedLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Optional[int] = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.num_labels
_lowerCamelCase : Dict = DebertaVaForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Dict = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.num_labels
_lowerCamelCase : Tuple = DebertaVaForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Any = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[str] = DebertaVaForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Tuple = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = DebertaVaForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : List[Any] = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Any = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCamelCase : Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : int = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
snake_case__ : int = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : List[str] = True
snake_case__ : List[Any] = False
snake_case__ : int = False
snake_case__ : Optional[Any] = False
snake_case__ : str = False
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : List[str] = DebertaVaModelTester(self )
_lowerCamelCase : Any = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*__lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Tuple = DebertaVaModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase):
@unittest.skip(reason='''Model not available yet''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
pass
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Tuple = DebertaVaModel.from_pretrained('''microsoft/deberta-v2-xlarge''' )
_lowerCamelCase : List[str] = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
_lowerCamelCase : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowerCamelCase : Tuple = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0]
# compare the actual values for a slice.
_lowerCamelCase : Union[str, Any] = torch.tensor(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCAmelCase , atol=1E-4 ) , f'''{output[:, 1:4, 1:4]}''' )
| 83 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__ = {
'''configuration_maskformer''': ['''MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MaskFormerConfig'''],
'''configuration_maskformer_swin''': ['''MaskFormerSwinConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''MaskFormerFeatureExtractor''']
SCREAMING_SNAKE_CASE__ = ['''MaskFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MaskFormerForInstanceSegmentation''',
'''MaskFormerModel''',
'''MaskFormerPreTrainedModel''',
]
SCREAMING_SNAKE_CASE__ = [
'''MaskFormerSwinBackbone''',
'''MaskFormerSwinModel''',
'''MaskFormerSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 9 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
lowerCAmelCase__ = '''hf-internal-testing/tiny-random-bert'''
lowerCAmelCase__ = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''')
lowerCAmelCase__ = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6'''
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = cached_file(__lowerCAmelCase , __lowerCAmelCase )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(__lowerCAmelCase ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) ) )
with open(os.path.join(__lowerCAmelCase , '''refs''' , '''main''' ) ) as f:
_lowerCamelCase : Optional[int] = f.read()
self.assertEqual(__lowerCAmelCase , os.path.join(__lowerCAmelCase , '''snapshots''' , __lowerCAmelCase , __lowerCAmelCase ) )
self.assertTrue(os.path.isfile(__lowerCAmelCase ) )
# File is cached at the same place the second time.
_lowerCamelCase : Tuple = cached_file(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
# Using a specific revision to test the full commit hash.
_lowerCamelCase : Dict = cached_file(__lowerCAmelCase , __lowerCAmelCase , revision='''9b8c223''' )
self.assertEqual(__lowerCAmelCase , os.path.join(__lowerCAmelCase , '''snapshots''' , __lowerCAmelCase , __lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
with self.assertRaisesRegex(__lowerCAmelCase , '''is not a valid model identifier''' ):
_lowerCamelCase : Optional[int] = cached_file('''tiny-random-bert''' , __lowerCAmelCase )
with self.assertRaisesRegex(__lowerCAmelCase , '''is not a valid git identifier''' ):
_lowerCamelCase : str = cached_file(__lowerCAmelCase , __lowerCAmelCase , revision='''aaaa''' )
with self.assertRaisesRegex(__lowerCAmelCase , '''does not appear to have a file named''' ):
_lowerCamelCase : int = cached_file(__lowerCAmelCase , '''conf''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
with self.assertRaisesRegex(__lowerCAmelCase , '''does not appear to have a file named''' ):
_lowerCamelCase : Dict = cached_file(__lowerCAmelCase , '''conf''' )
with open(os.path.join(__lowerCAmelCase , '''refs''' , '''main''' ) ) as f:
_lowerCamelCase : List[Any] = f.read()
self.assertTrue(os.path.isfile(os.path.join(__lowerCAmelCase , '''.no_exist''' , __lowerCAmelCase , '''conf''' ) ) )
_lowerCamelCase : str = cached_file(__lowerCAmelCase , '''conf''' , _raise_exceptions_for_missing_entries=__lowerCAmelCase )
self.assertIsNone(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = cached_file(__lowerCAmelCase , '''conf''' , local_files_only=__lowerCAmelCase , _raise_exceptions_for_missing_entries=__lowerCAmelCase )
self.assertIsNone(__lowerCAmelCase )
_lowerCamelCase : Any = mock.Mock()
_lowerCamelCase : Optional[Any] = 5_0_0
_lowerCamelCase : Dict = {}
_lowerCamelCase : List[Any] = HTTPError
_lowerCamelCase : int = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=__lowerCAmelCase ) as mock_head:
_lowerCamelCase : Union[str, Any] = cached_file(__lowerCAmelCase , '''conf''' , _raise_exceptions_for_connection_errors=__lowerCAmelCase )
self.assertIsNone(__lowerCAmelCase )
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __lowerCAmelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __lowerCAmelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(__lowerCAmelCase , '''is not a valid model identifier''' ):
get_file_from_repo('''bert-base-case''' , __lowerCAmelCase )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(__lowerCAmelCase , '''is not a valid git identifier''' ):
get_file_from_repo('''bert-base-cased''' , __lowerCAmelCase , revision='''ahaha''' )
_lowerCamelCase : Dict = get_file_from_repo('''bert-base-cased''' , __lowerCAmelCase )
# The name is the cached name which is not very easy to test, so instead we load the content.
_lowerCamelCase : Dict = json.loads(open(__lowerCAmelCase , '''r''' ).read() )
self.assertEqual(config['''hidden_size'''] , 7_6_8 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : Any = Path(__lowerCAmelCase ) / '''a.txt'''
filename.touch()
self.assertEqual(get_file_from_repo(__lowerCAmelCase , '''a.txt''' ) , str(__lowerCAmelCase ) )
self.assertIsNone(get_file_from_repo(__lowerCAmelCase , '''b.txt''' ) )
| 83 | 0 |
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
lowerCamelCase__ : Dict = logging.get_logger(__name__)
lowerCamelCase__ : int = {}
lowerCamelCase__ : List[str] = {}
lowerCamelCase__ : Tuple = {}
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , ) -> Dict:
snake_case__ = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
F"""Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})""" )
snake_case__ = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
F"""Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})""" )
snake_case__ = format_type
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None ) -> Any:
snake_case__ = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
snake_case__ = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=["""python"""])
_register_formatter(ArrowFormatter, """arrow""", aliases=["""pa""", """pyarrow"""])
_register_formatter(NumpyFormatter, """numpy""", aliases=["""np"""])
_register_formatter(PandasFormatter, """pandas""", aliases=["""pd"""])
_register_formatter(CustomFormatter, """custom""")
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, """torch""", aliases=["""pt""", """pytorch"""])
else:
lowerCamelCase__ : Tuple = ValueError("""PyTorch needs to be installed to be able to return PyTorch tensors.""")
_register_unavailable_formatter(_torch_error, """torch""", aliases=["""pt""", """pytorch"""])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, """tensorflow""", aliases=["""tf"""])
else:
lowerCamelCase__ : Dict = ValueError("""Tensorflow needs to be installed to be able to return Tensorflow tensors.""")
_register_unavailable_formatter(_tf_error, """tensorflow""", aliases=["""tf"""])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, """jax""", aliases=[])
else:
lowerCamelCase__ : Tuple = ValueError("""JAX needs to be installed to be able to return JAX arrays.""")
_register_unavailable_formatter(_jax_error, """jax""", aliases=[])
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int:
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , **__lowerCAmelCase ) -> Union[str, Any]:
snake_case__ = get_format_type_from_alias(A_ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**A_ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
F"""Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'""" )
| 33 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __snake_case ( _lowercase):
snake_case__ : List[str] = "cvt"
def __init__( self : Any , __lowerCAmelCase : Union[str, Any]=3 , __lowerCAmelCase : List[str]=[7, 3, 3] , __lowerCAmelCase : int=[4, 2, 2] , __lowerCAmelCase : int=[2, 1, 1] , __lowerCAmelCase : str=[6_4, 1_9_2, 3_8_4] , __lowerCAmelCase : Dict=[1, 3, 6] , __lowerCAmelCase : Optional[Any]=[1, 2, 1_0] , __lowerCAmelCase : Dict=[4.0, 4.0, 4.0] , __lowerCAmelCase : Dict=[0.0, 0.0, 0.0] , __lowerCAmelCase : Union[str, Any]=[0.0, 0.0, 0.0] , __lowerCAmelCase : int=[0.0, 0.0, 0.1] , __lowerCAmelCase : Union[str, Any]=[True, True, True] , __lowerCAmelCase : str=[False, False, True] , __lowerCAmelCase : List[str]=["dw_bn", "dw_bn", "dw_bn"] , __lowerCAmelCase : List[Any]=[3, 3, 3] , __lowerCAmelCase : Dict=[1, 1, 1] , __lowerCAmelCase : str=[2, 2, 2] , __lowerCAmelCase : Optional[Any]=[1, 1, 1] , __lowerCAmelCase : Optional[Any]=[1, 1, 1] , __lowerCAmelCase : str=0.02 , __lowerCAmelCase : Any=1E-12 , **__lowerCAmelCase : int , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
_lowerCamelCase : Optional[int] = num_channels
_lowerCamelCase : int = patch_sizes
_lowerCamelCase : Optional[Any] = patch_stride
_lowerCamelCase : str = patch_padding
_lowerCamelCase : Any = embed_dim
_lowerCamelCase : Optional[Any] = num_heads
_lowerCamelCase : Dict = depth
_lowerCamelCase : Optional[int] = mlp_ratio
_lowerCamelCase : Any = attention_drop_rate
_lowerCamelCase : Any = drop_rate
_lowerCamelCase : Dict = drop_path_rate
_lowerCamelCase : Optional[int] = qkv_bias
_lowerCamelCase : int = cls_token
_lowerCamelCase : int = qkv_projection_method
_lowerCamelCase : Optional[Any] = kernel_qkv
_lowerCamelCase : List[str] = padding_kv
_lowerCamelCase : Tuple = stride_kv
_lowerCamelCase : Union[str, Any] = padding_q
_lowerCamelCase : Optional[Any] = stride_q
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : Optional[int] = layer_norm_eps
| 83 | 0 |
from math import factorial
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
if n < k or k < 0:
raise ValueError('Please enter positive integers for n and k where n >= k' )
return factorial(A_ ) // (factorial(A_ ) * factorial(n - k ))
if __name__ == "__main__":
print(
"The number of five-card hands possible from a standard",
f'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
"If a class of 40 students must be arranged into groups of",
f'''4 for group projects, there are {combinations(40, 4)} ways''',
"to arrange them.\n",
)
print(
"If 10 teams are competing in a Formula One race, there",
f'''are {combinations(10, 3)} ways that first, second and''',
"third place can be awarded.",
)
| 345 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class __snake_case ( _lowercase):
snake_case__ : torch.FloatTensor
snake_case__ : torch.FloatTensor
class __snake_case ( _lowercase , _lowercase):
snake_case__ : int = 1
@register_to_config
def __init__( self : str , __lowerCAmelCase : int = 2_0_0_0 , __lowerCAmelCase : float = 0.15 , __lowerCAmelCase : float = 0.01 , __lowerCAmelCase : float = 13_48.0 , __lowerCAmelCase : float = 1E-5 , __lowerCAmelCase : int = 1 , ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = sigma_max
# setable values
_lowerCamelCase : Dict = None
self.set_sigmas(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : Optional[int] = None ):
"""simple docstring"""
return sample
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : float = None , __lowerCAmelCase : Union[str, torch.device] = None ):
"""simple docstring"""
_lowerCamelCase : Tuple = sampling_eps if sampling_eps is not None else self.config.sampling_eps
_lowerCamelCase : Optional[int] = torch.linspace(1 , __lowerCAmelCase , __lowerCAmelCase , device=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : float = None , __lowerCAmelCase : float = None , __lowerCAmelCase : float = None ):
"""simple docstring"""
_lowerCamelCase : List[str] = sigma_min if sigma_min is not None else self.config.sigma_min
_lowerCamelCase : int = sigma_max if sigma_max is not None else self.config.sigma_max
_lowerCamelCase : Any = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[Any] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
_lowerCamelCase : Optional[int] = torch.exp(torch.linspace(math.log(__lowerCAmelCase ) , math.log(__lowerCAmelCase ) , __lowerCAmelCase ) )
_lowerCamelCase : Tuple = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : int , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : Optional[torch.Generator] = None , __lowerCAmelCase : bool = True , ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
_lowerCamelCase : Tuple = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
_lowerCamelCase : Dict = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
_lowerCamelCase : Optional[int] = timesteps.to(self.discrete_sigmas.device )
_lowerCamelCase : Any = self.discrete_sigmas[timesteps].to(sample.device )
_lowerCamelCase : int = self.get_adjacent_sigma(__lowerCAmelCase , __lowerCAmelCase ).to(sample.device )
_lowerCamelCase : Any = torch.zeros_like(__lowerCAmelCase )
_lowerCamelCase : Any = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
_lowerCamelCase : Union[str, Any] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
_lowerCamelCase : List[Any] = diffusion.unsqueeze(-1 )
_lowerCamelCase : int = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
_lowerCamelCase : List[str] = randn_tensor(
sample.shape , layout=sample.layout , generator=__lowerCAmelCase , device=sample.device , dtype=sample.dtype )
_lowerCamelCase : List[Any] = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
_lowerCamelCase : int = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__lowerCAmelCase , prev_sample_mean=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : Optional[torch.Generator] = None , __lowerCAmelCase : bool = True , ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
_lowerCamelCase : Union[str, Any] = randn_tensor(sample.shape , layout=sample.layout , generator=__lowerCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
_lowerCamelCase : Union[str, Any] = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
_lowerCamelCase : Tuple = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
_lowerCamelCase : str = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
_lowerCamelCase : Tuple = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
_lowerCamelCase : Union[str, Any] = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
_lowerCamelCase : str = step_size.unsqueeze(-1 )
_lowerCamelCase : Any = sample + step_size * model_output
_lowerCamelCase : int = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : torch.FloatTensor , ):
"""simple docstring"""
_lowerCamelCase : Dict = timesteps.to(original_samples.device )
_lowerCamelCase : Union[str, Any] = self.discrete_sigmas.to(original_samples.device )[timesteps]
_lowerCamelCase : Union[str, Any] = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__lowerCAmelCase ) * sigmas[:, None, None, None]
)
_lowerCamelCase : int = noise + original_samples
return noisy_samples
def __len__( self : Optional[int] ):
"""simple docstring"""
return self.config.num_train_timesteps
| 83 | 0 |
UpperCamelCase_ = 0 # The first color of the flag.
UpperCamelCase_ = 1 # The second color of the flag.
UpperCamelCase_ = 2 # The third color of the flag.
UpperCamelCase_ = (red, white, blue)
def _UpperCAmelCase ( A ):
'''simple docstring'''
if not sequence:
return []
if len(A_ ) == 1:
return list(A_ )
UpperCAmelCase__ =0
UpperCAmelCase__ =len(A_ ) - 1
UpperCAmelCase__ =0
while mid <= high:
if sequence[mid] == colors[0]:
UpperCAmelCase__ =sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
UpperCAmelCase__ =sequence[high], sequence[mid]
high -= 1
else:
UpperCAmelCase__ =F"""The elements inside the sequence must contains only {colors} values"""
raise ValueError(A_ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase_ = input('Enter numbers separated by commas:\n').strip()
UpperCamelCase_ = [int(item.strip()) for item in user_input.split(',')]
print(f"""{dutch_national_flag_sort(unsorted)}""")
| 625 |
"""simple docstring"""
from torch import nn
def snake_case_ ( A_ : int ):
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F'''Unsupported activation function: {act_fn}''' )
| 83 | 0 |
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class A ( _lowercase ):
def lowerCamelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Tuple =tempfile.mkdtemp()
_lowerCamelCase : List[Any] =5
# Realm tok
_lowerCamelCase : Union[str, Any] =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_lowerCamelCase : Tuple =os.path.join(self.tmpdirname , 'realm_tokenizer' )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
_lowerCamelCase : Optional[int] =os.path.join(__lowerCAmelCase , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
_lowerCamelCase : List[Any] =os.path.join(self.tmpdirname , 'realm_block_records' )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
def lowerCamelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'realm_tokenizer' ) )
def lowerCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
_lowerCamelCase : Dict =RealmConfig(num_block_records=self.num_block_records )
return config
def lowerCamelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Optional[Any] =Dataset.from_dict(
{
'id': ['0', '1'],
'question': ['foo', 'bar'],
'answers': [['Foo', 'Bar'], ['Bar']],
} )
return dataset
def lowerCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
_lowerCamelCase : Any =np.array(
[
b'This is the first record',
b'This is the second record',
b'This is the third record',
b'This is the fourth record',
b'This is the fifth record',
b'This is a longer longer longer record',
] , dtype=__lowerCAmelCase , )
return block_records
def lowerCamelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : str =RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def lowerCamelCase ( self : List[str] ) -> Any:
"""simple docstring"""
_lowerCamelCase : Dict =self.get_config()
_lowerCamelCase : Dict =self.get_dummy_retriever()
_lowerCamelCase : List[Any] =retriever.tokenizer
_lowerCamelCase : str =np.array([0, 3] , dtype='long' )
_lowerCamelCase : Optional[int] =tokenizer(['Test question'] ).input_ids
_lowerCamelCase : str =tokenizer(
['the fourth'] , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , ).input_ids
_lowerCamelCase : Dict =config.reader_seq_len
_lowerCamelCase : Dict =retriever(
__lowerCAmelCase , __lowerCAmelCase , answer_ids=__lowerCAmelCase , max_length=__lowerCAmelCase , return_tensors='np' )
self.assertEqual(len(__lowerCAmelCase ) , 2 )
self.assertEqual(len(__lowerCAmelCase ) , 2 )
self.assertEqual(len(__lowerCAmelCase ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'first', 'record', '[SEP]'] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'fourth', 'record', '[SEP]'] , )
def lowerCamelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase : int =self.get_config()
_lowerCamelCase : Dict =self.get_dummy_retriever()
_lowerCamelCase : List[Any] =retriever.tokenizer
_lowerCamelCase : Optional[int] =np.array([0, 3, 5] , dtype='long' )
_lowerCamelCase : Dict =tokenizer(['Test question'] ).input_ids
_lowerCamelCase : List[Any] =tokenizer(
['the fourth', 'longer longer'] , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , ).input_ids
_lowerCamelCase : Optional[Any] =config.reader_seq_len
_lowerCamelCase : Optional[Any] =retriever(
__lowerCAmelCase , __lowerCAmelCase , answer_ids=__lowerCAmelCase , max_length=__lowerCAmelCase , return_tensors='np' )
self.assertEqual([False, True, True] , __lowerCAmelCase )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __lowerCAmelCase )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __lowerCAmelCase )
def lowerCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : int =self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , 'realm_block_records' ) )
# Test local path
_lowerCamelCase : str =retriever.from_pretrained(os.path.join(self.tmpdirname , 'realm_block_records' ) )
self.assertEqual(retriever.block_records[0] , b'This is the first record' )
# Test mocked remote path
with patch('transformers.models.realm.retrieval_realm.hf_hub_download' ) as mock_hf_hub_download:
_lowerCamelCase : List[Any] =os.path.join(
os.path.join(self.tmpdirname , 'realm_block_records' ) , _REALM_BLOCK_RECORDS_FILENAME )
_lowerCamelCase : List[Any] =RealmRetriever.from_pretrained('google/realm-cc-news-pretrained-openqa' )
self.assertEqual(retriever.block_records[0] , b'This is the first record' )
| 464 |
"""simple docstring"""
def snake_case_ ( A_ : int, A_ : int ):
'''simple docstring'''
return int(input_a == input_a == 0 )
def snake_case_ ( ):
'''simple docstring'''
print('''Truth Table of NOR Gate:''' )
print('''| Input 1 | Input 2 | Output |''' )
print(F'''| 0 | 0 | {nor_gate(0, 0 )} |''' )
print(F'''| 0 | 1 | {nor_gate(0, 1 )} |''' )
print(F'''| 1 | 0 | {nor_gate(1, 0 )} |''' )
print(F'''| 1 | 1 | {nor_gate(1, 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 83 | 0 |
'''simple docstring'''
import math
import unittest
def _UpperCamelCase ( UpperCamelCase__ ):
assert isinstance(A_ , A_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class _snake_case ( unittest.TestCase ):
def snake_case__ ( self):
self.assertTrue(is_prime(2))
self.assertTrue(is_prime(3))
self.assertTrue(is_prime(5))
self.assertTrue(is_prime(7))
self.assertTrue(is_prime(11))
self.assertTrue(is_prime(13))
self.assertTrue(is_prime(17))
self.assertTrue(is_prime(19))
self.assertTrue(is_prime(23))
self.assertTrue(is_prime(29))
def snake_case__ ( self):
with self.assertRaises(__lowerCAmelCase):
is_prime(-19)
self.assertFalse(
is_prime(0) , """Zero doesn\'t have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2))
self.assertFalse(is_prime(2 * 3))
self.assertFalse(is_prime(3 * 3))
self.assertFalse(is_prime(3 * 5))
self.assertFalse(is_prime(3 * 5 * 7))
if __name__ == "__main__":
unittest.main() | 407 |
"""simple docstring"""
from __future__ import annotations
def snake_case_ ( A_ : list[list[int]] ):
'''simple docstring'''
for i in range(1, len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1, len(A_ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1, len(A_ ) ):
for j in range(1, len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j], matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : List[Any] = {'configuration_plbart': ['PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PLBartConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = ['PLBartTokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
'PLBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'PLBartForCausalLM',
'PLBartForConditionalGeneration',
'PLBartForSequenceClassification',
'PLBartModel',
'PLBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 404 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('''T''')
class __snake_case ( Generic[T]):
def __init__( self : int , __lowerCAmelCase : T ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = data
_lowerCamelCase : Node[T] | None = None
def __str__( self : Optional[Any] ):
"""simple docstring"""
return f'''{self.data}'''
class __snake_case ( Generic[T]):
def __init__( self : int ):
"""simple docstring"""
_lowerCamelCase : Node[T] | None = None
def __iter__( self : str ):
"""simple docstring"""
_lowerCamelCase : List[str] = self.top
while node:
yield node.data
_lowerCamelCase : Any = node.next
def __str__( self : int ):
"""simple docstring"""
return "->".join([str(__lowerCAmelCase ) for item in self] )
def __len__( self : int ):
"""simple docstring"""
return len(tuple(iter(self ) ) )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return self.top is None
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : T ):
"""simple docstring"""
_lowerCamelCase : Tuple = Node(__lowerCAmelCase )
if not self.is_empty():
_lowerCamelCase : Optional[int] = self.top
_lowerCamelCase : List[str] = node
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
if self.is_empty():
raise IndexError('''pop from empty stack''' )
assert isinstance(self.top , __lowerCAmelCase )
_lowerCamelCase : Any = self.top
_lowerCamelCase : Any = self.top.next
return pop_node.data
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
if self.is_empty():
raise IndexError('''peek from empty stack''' )
assert self.top is not None
return self.top.data
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : List[str] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 83 | 0 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ (_lowercase , unittest.TestCase ):
lowerCamelCase__ = RobertaTokenizer
lowerCamelCase__ = RobertaTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = {"cls_token": "<s>"}
def __a ( self ) -> str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase_ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowerCAmelCase_ = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
lowerCAmelCase_ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCAmelCase_ = {'''unk_token''': '''<unk>'''}
lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__lowerCAmelCase ) )
def __a ( self , **_a ) -> Optional[int]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def __a ( self , **_a ) -> int:
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def __a ( self , _a ) -> int:
lowerCAmelCase_ = '''lower newer'''
lowerCAmelCase_ = '''lower newer'''
return input_text, output_text
def __a ( self ) -> Tuple:
lowerCAmelCase_ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase_ = '''lower newer'''
lowerCAmelCase_ = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
lowerCAmelCase_ = tokenizer.tokenize(__lowerCAmelCase ) # , add_prefix_space=True)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
lowerCAmelCase_ = tokens + [tokenizer.unk_token]
lowerCAmelCase_ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
def __a ( self ) -> Dict:
lowerCAmelCase_ = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=__lowerCAmelCase ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=__lowerCAmelCase ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def __a ( self ) -> Tuple:
lowerCAmelCase_ = self.tokenizer_class.from_pretrained("roberta-base" )
lowerCAmelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=__lowerCAmelCase )
lowerCAmelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=__lowerCAmelCase )
lowerCAmelCase_ = tokenizer.encode(
"sequence builders" , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
lowerCAmelCase_ = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase )
lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = '''Encode this sequence.'''
lowerCAmelCase_ = tokenizer.byte_encoder[''' '''.encode("utf-8" )[0]]
# Testing encoder arguments
lowerCAmelCase_ = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
lowerCAmelCase_ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
lowerCAmelCase_ = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
lowerCAmelCase_ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
lowerCAmelCase_ = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
lowerCAmelCase_ = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing spaces after special tokens
lowerCAmelCase_ = '''<mask>'''
tokenizer.add_special_tokens(
{"mask_token": AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase )} ) # mask token has a left space
lowerCAmelCase_ = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
lowerCAmelCase_ = '''Encode <mask> sequence'''
lowerCAmelCase_ = '''Encode <mask>sequence'''
lowerCAmelCase_ = tokenizer.encode(__lowerCAmelCase )
lowerCAmelCase_ = encoded.index(__lowerCAmelCase )
lowerCAmelCase_ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
lowerCAmelCase_ = tokenizer.encode(__lowerCAmelCase )
lowerCAmelCase_ = encoded.index(__lowerCAmelCase )
lowerCAmelCase_ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
def __a ( self ) -> Optional[Any]:
pass
def __a ( self ) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
lowerCAmelCase_ = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
lowerCAmelCase_ = '''A, <mask> AllenNLP sentence.'''
lowerCAmelCase_ = tokenizer_r.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase )
lowerCAmelCase_ = tokenizer_p.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
lowerCAmelCase_ = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
lowerCAmelCase_ = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
__lowerCAmelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
__lowerCAmelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def __a ( self ) -> str:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
lowerCAmelCase_ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
lowerCAmelCase_ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , __lowerCAmelCase )
self.assertEqual(post_processor_state["add_prefix_space"] , __lowerCAmelCase )
self.assertEqual(post_processor_state["trim_offsets"] , __lowerCAmelCase )
def __a ( self ) -> List[str]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase_ = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
lowerCAmelCase_ = f"{text_of_1_token} {text_of_1_token}"
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
lowerCAmelCase_ = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCAmelCase ) + 1, len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
lowerCAmelCase_ = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCAmelCase ) + 1, len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
lowerCAmelCase_ = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCAmelCase ), len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
lowerCAmelCase_ = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCAmelCase ), len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
lowerCAmelCase_ = f" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
lowerCAmelCase_ = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCAmelCase ) + 1, 1 + len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
lowerCAmelCase_ = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCAmelCase ), 1 + len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
lowerCAmelCase_ = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCAmelCase ), 1 + len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
| 122 |
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCAmelCase__ = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase__ = importlib.util.spec_from_file_location(
'''transformers''',
os.path.join(PATH_TO_TRANSFORMERS, '''__init__.py'''),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
lowerCAmelCase__ = spec.loader.load_module()
lowerCAmelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCAmelCase__ = re.compile('''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
lowerCAmelCase__ = {
'''CLIPConfigMixin''',
'''DecisionTransformerConfigMixin''',
'''EncoderDecoderConfigMixin''',
'''RagConfigMixin''',
'''SpeechEncoderDecoderConfigMixin''',
'''VisionEncoderDecoderConfigMixin''',
'''VisionTextDualEncoderConfigMixin''',
}
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Any = []
for config_class in list(CONFIG_MAPPING.values() ):
_lowerCamelCase : Tuple = False
# source code of `config_class`
_lowerCamelCase : int = inspect.getsource(A_ )
_lowerCamelCase : str = _re_checkpoint.findall(A_ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
_lowerCamelCase , _lowerCamelCase : Tuple = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
_lowerCamelCase : Tuple = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
_lowerCamelCase : Union[str, Any] = True
break
_lowerCamelCase : Tuple = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(A_ )
if len(A_ ) > 0:
_lowerCamelCase : Union[str, Any] = '''\n'''.join(sorted(A_ ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 83 | 0 |
"""simple docstring"""
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = SwinConfig()
_lowerCAmelCase : Dict = swin_name.split('_' )
_lowerCAmelCase : List[Any] = name_split[1]
_lowerCAmelCase : List[Any] = int(name_split[4] )
_lowerCAmelCase : str = int(name_split[3][-1] )
if model_size == "tiny":
_lowerCAmelCase : Dict = 96
_lowerCAmelCase : Tuple = (2, 2, 6, 2)
_lowerCAmelCase : List[Any] = (3, 6, 12, 24)
elif model_size == "small":
_lowerCAmelCase : Union[str, Any] = 96
_lowerCAmelCase : List[Any] = (2, 2, 18, 2)
_lowerCAmelCase : str = (3, 6, 12, 24)
elif model_size == "base":
_lowerCAmelCase : int = 128
_lowerCAmelCase : List[str] = (2, 2, 18, 2)
_lowerCAmelCase : Tuple = (4, 8, 16, 32)
else:
_lowerCAmelCase : Optional[int] = 192
_lowerCAmelCase : Any = (2, 2, 18, 2)
_lowerCAmelCase : Tuple = (6, 12, 24, 48)
if "in22k" in swin_name:
_lowerCAmelCase : Optional[int] = 21841
else:
_lowerCAmelCase : Optional[int] = 1000
_lowerCAmelCase : str = '''huggingface/label-files'''
_lowerCAmelCase : str = '''imagenet-1k-id2label.json'''
_lowerCAmelCase : Any = json.load(open(hf_hub_download(A_ , A_ , repo_type='dataset' ) , 'r' ) )
_lowerCAmelCase : Optional[int] = {int(A_ ): v for k, v in idalabel.items()}
_lowerCAmelCase : List[str] = idalabel
_lowerCAmelCase : int = {v: k for k, v in idalabel.items()}
_lowerCAmelCase : List[Any] = img_size
_lowerCAmelCase : int = num_classes
_lowerCAmelCase : Optional[int] = embed_dim
_lowerCAmelCase : Optional[int] = depths
_lowerCAmelCase : Optional[Any] = num_heads
_lowerCAmelCase : Union[str, Any] = window_size
return config
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if "patch_embed.proj" in name:
_lowerCAmelCase : Optional[int] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_lowerCAmelCase : Optional[int] = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
_lowerCAmelCase : str = '''encoder.''' + name
if "attn.proj" in name:
_lowerCAmelCase : List[Any] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
_lowerCAmelCase : Dict = name.replace('attn' , 'attention.self' )
if "norm1" in name:
_lowerCAmelCase : List[Any] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
_lowerCAmelCase : List[Any] = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
_lowerCAmelCase : List[str] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_lowerCAmelCase : Optional[int] = name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
_lowerCAmelCase : Any = '''layernorm.weight'''
if name == "norm.bias":
_lowerCAmelCase : Union[str, Any] = '''layernorm.bias'''
if "head" in name:
_lowerCAmelCase : Dict = name.replace('head' , 'classifier' )
else:
_lowerCAmelCase : List[Any] = '''swin.''' + name
return name
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_lowerCAmelCase : int = orig_state_dict.pop(A_ )
if "mask" in key:
continue
elif "qkv" in key:
_lowerCAmelCase : Optional[int] = key.split('.' )
_lowerCAmelCase : Dict = int(key_split[1] )
_lowerCAmelCase : Dict = int(key_split[3] )
_lowerCAmelCase : List[Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_lowerCAmelCase : Optional[Any] = val[:dim, :]
_lowerCAmelCase : int = val[
dim : dim * 2, :
]
_lowerCAmelCase : Optional[Any] = val[-dim:, :]
else:
_lowerCAmelCase : Optional[int] = val[
:dim
]
_lowerCAmelCase : int = val[
dim : dim * 2
]
_lowerCAmelCase : Union[str, Any] = val[
-dim:
]
else:
_lowerCAmelCase : List[Any] = val
return orig_state_dict
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = timm.create_model(A_ , pretrained=A_ )
timm_model.eval()
_lowerCAmelCase : Union[str, Any] = get_swin_config(A_ )
_lowerCAmelCase : Union[str, Any] = SwinForImageClassification(A_ )
model.eval()
_lowerCAmelCase : Tuple = convert_state_dict(timm_model.state_dict() , A_ )
model.load_state_dict(A_ )
_lowerCAmelCase : Dict = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_lowerCAmelCase : Optional[int] = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
_lowerCAmelCase : str = Image.open(requests.get(A_ , stream=A_ ).raw )
_lowerCAmelCase : Tuple = image_processor(images=A_ , return_tensors='pt' )
_lowerCAmelCase : Dict = timm_model(inputs['pixel_values'] )
_lowerCAmelCase : Union[str, Any] = model(**A_ ).logits
assert torch.allclose(A_ , A_ , atol=1e-3 )
print(f"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(A_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(A_ )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_lowerCAmelCase = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 259 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
lowerCAmelCase__ = False
class __snake_case ( unittest.TestCase):
pass
@nightly
@require_torch_gpu
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : int = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : str = '''A painting of a squirrel eating a burger '''
_lowerCamelCase : Dict = torch.manual_seed(0 )
_lowerCamelCase : List[Any] = pipe(
prompt=__lowerCAmelCase , generator=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : Tuple = VersatileDiffusionTextToImagePipeline.from_pretrained(__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : int = generator.manual_seed(0 )
_lowerCamelCase : List[str] = pipe(
prompt=__lowerCAmelCase , generator=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = VersatileDiffusionTextToImagePipeline.from_pretrained(
'''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = '''A painting of a squirrel eating a burger '''
_lowerCamelCase : Optional[int] = torch.manual_seed(0 )
_lowerCamelCase : int = pipe(
prompt=__lowerCAmelCase , generator=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' ).images
_lowerCamelCase : List[str] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : Dict = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 83 | 0 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class UpperCamelCase ( _lowercase , _lowercase , unittest.TestCase ):
__UpperCamelCase =AutoencoderKL
__UpperCamelCase ="sample"
__UpperCamelCase =1e-2
@property
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = (3_2, 3_2)
SCREAMING_SNAKE_CASE = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase )
return {"sample": image}
@property
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return (3, 3_2, 3_2)
@property
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
return (3, 3_2, 3_2)
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = {
'''block_out_channels''': [3_2, 6_4],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
SCREAMING_SNAKE_CASE = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase ( self : int ):
"""simple docstring"""
pass
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
pass
@unittest.skipIf(torch_device == 'mps' , 'Gradient checkpointing skipped on MPS' )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE = self.model_class(**__lowerCAmelCase )
model.to(__lowerCAmelCase )
assert not model.is_gradient_checkpointing and model.training
SCREAMING_SNAKE_CASE = model(**__lowerCAmelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
SCREAMING_SNAKE_CASE = torch.randn_like(__lowerCAmelCase )
SCREAMING_SNAKE_CASE = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
SCREAMING_SNAKE_CASE = self.model_class(**__lowerCAmelCase )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(__lowerCAmelCase )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
SCREAMING_SNAKE_CASE = model_a(**__lowerCAmelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
SCREAMING_SNAKE_CASE = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
SCREAMING_SNAKE_CASE = dict(model.named_parameters() )
SCREAMING_SNAKE_CASE = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' , output_loading_info=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(__lowerCAmelCase )
SCREAMING_SNAKE_CASE = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' )
SCREAMING_SNAKE_CASE = model.to(__lowerCAmelCase )
model.eval()
if torch_device == "mps":
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
SCREAMING_SNAKE_CASE = image.to(__lowerCAmelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(__lowerCAmelCase , sample_posterior=__lowerCAmelCase , generator=__lowerCAmelCase ).sample
SCREAMING_SNAKE_CASE = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
SCREAMING_SNAKE_CASE = torch.tensor(
[
-4.00_78E-01,
-3.83_23E-04,
-1.26_81E-01,
-1.14_62E-01,
2.00_95E-01,
1.08_93E-01,
-8.82_47E-02,
-3.03_61E-01,
-9.86_44E-03,
] )
elif torch_device == "cpu":
SCREAMING_SNAKE_CASE = torch.tensor(
[-0.1_352, 0.0_878, 0.0_419, -0.0_818, -0.1_069, 0.0_688, -0.1_458, -0.4_446, -0.0_026] )
else:
SCREAMING_SNAKE_CASE = torch.tensor(
[-0.2_421, 0.4_642, 0.2_507, -0.0_438, 0.0_682, 0.3_160, -0.2_018, -0.0_727, 0.2_485] )
self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1E-2 ) )
@slow
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : Any , snake_case__ : Tuple , snake_case__ : Any ):
"""simple docstring"""
return F"""gaussian_noise_s={seed}_shape={'_'.join([str(__lowerCAmelCase ) for s in shape] )}.npy"""
def UpperCamelCase ( self : str ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : int , snake_case__ : Dict=0 , snake_case__ : Tuple=(4, 3, 5_1_2, 5_1_2) , snake_case__ : Tuple=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = torch.floataa if fpaa else torch.floataa
SCREAMING_SNAKE_CASE = torch.from_numpy(load_hf_numpy(self.get_file_format(__lowerCAmelCase , __lowerCAmelCase ) ) ).to(__lowerCAmelCase ).to(__lowerCAmelCase )
return image
def UpperCamelCase ( self : Dict , snake_case__ : List[str]="CompVis/stable-diffusion-v1-4" , snake_case__ : List[str]=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = '''fp16''' if fpaa else None
SCREAMING_SNAKE_CASE = torch.floataa if fpaa else torch.floataa
SCREAMING_SNAKE_CASE = AutoencoderKL.from_pretrained(
__lowerCAmelCase , subfolder='vae' , torch_dtype=__lowerCAmelCase , revision=__lowerCAmelCase , )
model.to(__lowerCAmelCase ).eval()
return model
def UpperCamelCase ( self : int , snake_case__ : int=0 ):
"""simple docstring"""
if torch_device == "mps":
return torch.manual_seed(__lowerCAmelCase )
return torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
@parameterized.expand(
[
# fmt: off
[3_3, [-0.1_603, 0.9_878, -0.0_495, -0.0_790, -0.2_709, 0.8_375, -0.2_060, -0.0_824], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]],
[4_7, [-0.2_376, 0.1_168, 0.1_332, -0.4_840, -0.2_508, -0.0_791, -0.0_493, -0.4_089], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]],
# fmt: on
] )
def UpperCamelCase ( self : str , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE = self.get_sd_image(__lowerCAmelCase )
SCREAMING_SNAKE_CASE = self.get_generator(__lowerCAmelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(__lowerCAmelCase , generator=__lowerCAmelCase , sample_posterior=__lowerCAmelCase ).sample
assert sample.shape == image.shape
SCREAMING_SNAKE_CASE = sample[-1, -2:, -2:, :2].flatten().float().cpu()
SCREAMING_SNAKE_CASE = torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice )
assert torch_all_close(__lowerCAmelCase , __lowerCAmelCase , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[3_3, [-0.0_513, 0.0_289, 1.3_799, 0.2_166, -0.2_573, -0.0_871, 0.5_103, -0.0_999]],
[4_7, [-0.4_128, -0.1_320, -0.3_704, 0.1_965, -0.4_116, -0.2_332, -0.3_340, 0.2_247]],
# fmt: on
] )
@require_torch_gpu
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Dict , snake_case__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_sd_vae_model(fpaa=__lowerCAmelCase )
SCREAMING_SNAKE_CASE = self.get_sd_image(__lowerCAmelCase , fpaa=__lowerCAmelCase )
SCREAMING_SNAKE_CASE = self.get_generator(__lowerCAmelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(__lowerCAmelCase , generator=__lowerCAmelCase , sample_posterior=__lowerCAmelCase ).sample
assert sample.shape == image.shape
SCREAMING_SNAKE_CASE = sample[-1, -2:, :2, -2:].flatten().float().cpu()
SCREAMING_SNAKE_CASE = torch.tensor(__lowerCAmelCase )
assert torch_all_close(__lowerCAmelCase , __lowerCAmelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[3_3, [-0.1_609, 0.9_866, -0.0_487, -0.0_777, -0.2_716, 0.8_368, -0.2_055, -0.0_814], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]],
[4_7, [-0.2_377, 0.1_147, 0.1_333, -0.4_841, -0.2_506, -0.0_805, -0.0_491, -0.4_085], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]],
# fmt: on
] )
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Any , snake_case__ : Tuple , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE = self.get_sd_image(__lowerCAmelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(__lowerCAmelCase ).sample
assert sample.shape == image.shape
SCREAMING_SNAKE_CASE = sample[-1, -2:, -2:, :2].flatten().float().cpu()
SCREAMING_SNAKE_CASE = torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice )
assert torch_all_close(__lowerCAmelCase , __lowerCAmelCase , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[1_3, [-0.2_051, -0.1_803, -0.2_311, -0.2_114, -0.3_292, -0.3_574, -0.2_953, -0.3_323]],
[3_7, [-0.2_632, -0.2_625, -0.2_199, -0.2_741, -0.4_539, -0.4_990, -0.3_720, -0.4_925]],
# fmt: on
] )
@require_torch_gpu
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE = self.get_sd_image(__lowerCAmelCase , shape=(3, 4, 6_4, 6_4) )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model.decode(__lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2]
SCREAMING_SNAKE_CASE = sample[-1, -2:, :2, -2:].flatten().cpu()
SCREAMING_SNAKE_CASE = torch.tensor(__lowerCAmelCase )
assert torch_all_close(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[2_7, [-0.0_369, 0.0_207, -0.0_776, -0.0_682, -0.1_747, -0.1_930, -0.1_465, -0.2_039]],
[1_6, [-0.1_628, -0.2_134, -0.2_747, -0.2_642, -0.3_774, -0.4_404, -0.3_687, -0.4_277]],
# fmt: on
] )
@require_torch_gpu
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_sd_vae_model(fpaa=__lowerCAmelCase )
SCREAMING_SNAKE_CASE = self.get_sd_image(__lowerCAmelCase , shape=(3, 4, 6_4, 6_4) , fpaa=__lowerCAmelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model.decode(__lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2]
SCREAMING_SNAKE_CASE = sample[-1, -2:, :2, -2:].flatten().float().cpu()
SCREAMING_SNAKE_CASE = torch.tensor(__lowerCAmelCase )
assert torch_all_close(__lowerCAmelCase , __lowerCAmelCase , atol=5E-3 )
@parameterized.expand([(1_3,), (1_6,), (2_7,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.' )
def UpperCamelCase ( self : Any , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_sd_vae_model(fpaa=__lowerCAmelCase )
SCREAMING_SNAKE_CASE = self.get_sd_image(__lowerCAmelCase , shape=(3, 4, 6_4, 6_4) , fpaa=__lowerCAmelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model.decode(__lowerCAmelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model.decode(__lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2]
assert torch_all_close(__lowerCAmelCase , __lowerCAmelCase , atol=1E-1 )
@parameterized.expand([(1_3,), (1_6,), (3_7,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.' )
def UpperCamelCase ( self : Optional[int] , snake_case__ : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE = self.get_sd_image(__lowerCAmelCase , shape=(3, 4, 6_4, 6_4) )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model.decode(__lowerCAmelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model.decode(__lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2]
assert torch_all_close(__lowerCAmelCase , __lowerCAmelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[3_3, [-0.3_001, 0.0_918, -2.6_984, -3.9_720, -3.2_099, -5.0_353, 1.7_338, -0.2_065, 3.4_267]],
[4_7, [-1.5_030, -4.3_871, -6.0_355, -9.1_157, -1.6_661, -2.7_853, 2.1_607, -5.0_823, 2.5_633]],
# fmt: on
] )
def UpperCamelCase ( self : Any , snake_case__ : int , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE = self.get_sd_image(__lowerCAmelCase )
SCREAMING_SNAKE_CASE = self.get_generator(__lowerCAmelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model.encode(__lowerCAmelCase ).latent_dist
SCREAMING_SNAKE_CASE = dist.sample(generator=__lowerCAmelCase )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
SCREAMING_SNAKE_CASE = sample[0, -1, -3:, -3:].flatten().cpu()
SCREAMING_SNAKE_CASE = torch.tensor(__lowerCAmelCase )
SCREAMING_SNAKE_CASE = 3E-3 if torch_device != '''mps''' else 1E-2
assert torch_all_close(__lowerCAmelCase , __lowerCAmelCase , atol=__lowerCAmelCase )
| 439 |
"""simple docstring"""
import os
import time
import numpy as np
import onnxruntime as ort
lowerCAmelCase__ = '''1'''
lowerCAmelCase__ = '''0'''
lowerCAmelCase__ = '''1'''
lowerCAmelCase__ = ort.SessionOptions()
lowerCAmelCase__ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('''Create inference session...''')
lowerCAmelCase__ = ['''TensorrtExecutionProvider''', '''CUDAExecutionProvider''']
lowerCAmelCase__ = ort.InferenceSession('''model.onnx''', sess_options=sess_opt, providers=execution_provider)
lowerCAmelCase__ = ort.RunOptions()
lowerCAmelCase__ = 128
lowerCAmelCase__ = 1
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
print('''Warm up phase...''')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Start inference...''')
lowerCAmelCase__ = time.time()
lowerCAmelCase__ = 2000
lowerCAmelCase__ = {}
for iter in range(max_iters):
lowerCAmelCase__ = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Average Inference Time = {:.3f} ms'''.format((time.time() - start_time) * 1000 / max_iters))
| 83 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_snake_case = {
'''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ResNetForImageClassification''',
'''ResNetModel''',
'''ResNetPreTrainedModel''',
'''ResNetBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFResNetForImageClassification''',
'''TFResNetModel''',
'''TFResNetPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''FlaxResNetForImageClassification''',
'''FlaxResNetModel''',
'''FlaxResNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 340 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def snake_case_ ( A_ : float, A_ : float, A_ : int ):
'''simple docstring'''
_lowerCamelCase : List[Any] = x
_lowerCamelCase : List[Any] = y
for step in range(A_ ): # noqa: B007
_lowerCamelCase : Dict = a * a - b * b + x
_lowerCamelCase : List[str] = 2 * a * b + y
_lowerCamelCase : Any = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def snake_case_ ( A_ : float ):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def snake_case_ ( A_ : float ):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(A_, 1, 1 ) )
def snake_case_ ( A_ : int = 8_00, A_ : int = 6_00, A_ : float = -0.6, A_ : float = 0, A_ : float = 3.2, A_ : int = 50, A_ : bool = True, ):
'''simple docstring'''
_lowerCamelCase : Tuple = Image.new('''RGB''', (image_width, image_height) )
_lowerCamelCase : int = img.load()
# loop through the image-coordinates
for image_x in range(A_ ):
for image_y in range(A_ ):
# determine the figure-coordinates based on the image-coordinates
_lowerCamelCase : Optional[Any] = figure_width / image_width * image_height
_lowerCamelCase : List[Any] = figure_center_x + (image_x / image_width - 0.5) * figure_width
_lowerCamelCase : Optional[Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
_lowerCamelCase : str = get_distance(A_, A_, A_ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_lowerCamelCase : Dict = get_color_coded_rgb(A_ )
else:
_lowerCamelCase : str = get_black_and_white_rgb(A_ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowerCAmelCase__ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 83 | 0 |
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , _snake_case : Optional[int] , _snake_case : Union[str, Any]=3 , _snake_case : List[Any]=32 , _snake_case : Tuple=3 , _snake_case : Union[str, Any]=10 , _snake_case : Union[str, Any]=[10, 20, 30, 40] , _snake_case : Any=[1, 1, 2, 1] , _snake_case : Tuple=True , _snake_case : Union[str, Any]=True , _snake_case : Tuple="relu" , _snake_case : Tuple=3 , _snake_case : List[str]=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = embeddings_size
A__ = hidden_sizes
A__ = depths
A__ = is_training
A__ = use_labels
A__ = hidden_act
A__ = num_labels
A__ = scope
A__ = len(__lowerCAmelCase )
def _a ( self : Any ):
"""simple docstring"""
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.num_labels )
A__ = self.get_config()
return config, pixel_values, labels
def _a ( self : List[str] ):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def _a ( self : List[str] , _snake_case : Optional[Any] , _snake_case : str , _snake_case : List[str] ):
"""simple docstring"""
A__ = RegNetModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
A__ = model(__lowerCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _a ( self : Dict , _snake_case : Optional[Any] , _snake_case : Any , _snake_case : List[str] ):
"""simple docstring"""
A__ = self.num_labels
A__ = RegNetForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
A__ = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : int ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ = config_and_inputs
A__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
A__ : Any = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
A__ : int = (
{"feature-extraction": RegNetModel, "image-classification": RegNetForImageClassification}
if is_torch_available()
else {}
)
A__ : Dict = False
A__ : List[Any] = False
A__ : Optional[int] = False
A__ : List[str] = False
def _a ( self : Dict ):
"""simple docstring"""
A__ = RegNetModelTester(self )
A__ = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase )
def _a ( self : int ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self : str ):
"""simple docstring"""
return
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def _a ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def _a ( self : int ):
"""simple docstring"""
pass
def _a ( self : Dict ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(__lowerCAmelCase )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(config=__lowerCAmelCase )
for name, module in model.named_modules():
if isinstance(__lowerCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def _a ( self : int ):
"""simple docstring"""
def check_hidden_states_output(_snake_case : Optional[Any] , _snake_case : Dict , _snake_case : Dict ):
A__ = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A__ = self.model_tester.num_stages
self.assertEqual(len(__lowerCAmelCase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
A__ = layer_type
A__ = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _a ( self : str ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
@slow
def _a ( self : List[Any] ):
"""simple docstring"""
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = RegNetModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def A ( ) -> Tuple:
A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : List[Any] ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _a ( self : List[str] ):
"""simple docstring"""
A__ = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__lowerCAmelCase )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=__lowerCAmelCase , return_tensors='pt' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
A__ = model(**__lowerCAmelCase )
# verify the logits
A__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
A__ = torch.tensor([-0.4180, -1.5051, -3.4836] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
| 9 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def snake_case_ ( A_ : Tuple, A_ : List[str], A_ : Optional[Any], A_ : Dict, A_ : Dict=True, A_ : int="pt" ):
'''simple docstring'''
_lowerCamelCase : str = {'''add_prefix_space''': True} if isinstance(A_, A_ ) and not line.startswith(''' ''' ) else {}
_lowerCamelCase : Union[str, Any] = padding_side
return tokenizer(
[line], max_length=A_, padding='''max_length''' if pad_to_max_length else None, truncation=A_, return_tensors=A_, add_special_tokens=A_, **A_, )
def snake_case_ ( A_ : Any, A_ : Optional[int], A_ : List[Any]=None, ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = input_ids.ne(A_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __snake_case ( _lowercase):
def __init__( self : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple="train" , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Any=None , __lowerCAmelCase : Union[str, Any]="" , ):
"""simple docstring"""
super().__init__()
_lowerCamelCase : Optional[int] = Path(__lowerCAmelCase ).joinpath(type_path + '''.source''' )
_lowerCamelCase : List[str] = Path(__lowerCAmelCase ).joinpath(type_path + '''.target''' )
_lowerCamelCase : List[Any] = self.get_char_lens(self.src_file )
_lowerCamelCase : Optional[int] = max_source_length
_lowerCamelCase : Optional[Any] = max_target_length
assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}'''
_lowerCamelCase : List[Any] = tokenizer
_lowerCamelCase : List[Any] = prefix
if n_obs is not None:
_lowerCamelCase : List[str] = self.src_lens[:n_obs]
_lowerCamelCase : int = src_lang
_lowerCamelCase : Union[str, Any] = tgt_lang
def __len__( self : int ):
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self : Dict , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = index + 1 # linecache starts at 1
_lowerCamelCase : Union[str, Any] = self.prefix + linecache.getline(str(self.src_file ) , __lowerCAmelCase ).rstrip('''\n''' )
_lowerCamelCase : Optional[Any] = linecache.getline(str(self.tgt_file ) , __lowerCAmelCase ).rstrip('''\n''' )
assert source_line, f'''empty source line for index {index}'''
assert tgt_line, f'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , __lowerCAmelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_lowerCamelCase : Optional[int] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , __lowerCAmelCase ) else self.tokenizer
)
_lowerCamelCase : Union[str, Any] = self.tokenizer.generator if isinstance(self.tokenizer , __lowerCAmelCase ) else self.tokenizer
_lowerCamelCase : List[str] = encode_line(__lowerCAmelCase , __lowerCAmelCase , self.max_source_length , '''right''' )
_lowerCamelCase : List[str] = encode_line(__lowerCAmelCase , __lowerCAmelCase , self.max_target_length , '''right''' )
_lowerCamelCase : Optional[Any] = source_inputs['''input_ids'''].squeeze()
_lowerCamelCase : Union[str, Any] = target_inputs['''input_ids'''].squeeze()
_lowerCamelCase : Any = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase : str ):
"""simple docstring"""
return [len(__lowerCAmelCase ) for x in Path(__lowerCAmelCase ).open().readlines()]
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : List[Any] = torch.stack([x['''input_ids'''] for x in batch] )
_lowerCamelCase : Tuple = torch.stack([x['''attention_mask'''] for x in batch] )
_lowerCamelCase : Union[str, Any] = torch.stack([x['''decoder_input_ids'''] for x in batch] )
_lowerCamelCase : Tuple = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , __lowerCAmelCase )
else self.tokenizer.pad_token_id
)
_lowerCamelCase : Tuple = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , __lowerCAmelCase )
else self.tokenizer.pad_token_id
)
_lowerCamelCase : Union[str, Any] = trim_batch(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : List[str] = trim_batch(__lowerCAmelCase , __lowerCAmelCase , attention_mask=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
lowerCAmelCase__ = getLogger(__name__)
def snake_case_ ( A_ : List[List] ):
'''simple docstring'''
return list(itertools.chain.from_iterable(A_ ) )
def snake_case_ ( A_ : str ):
'''simple docstring'''
_lowerCamelCase : Dict = get_git_info()
save_json(A_, os.path.join(A_, '''git_log.json''' ) )
def snake_case_ ( A_ : str, A_ : Union[str, Any], A_ : int=4, **A_ : Optional[int] ):
'''simple docstring'''
with open(A_, '''w''' ) as f:
json.dump(A_, A_, indent=A_, **A_ )
def snake_case_ ( A_ : Any ):
'''simple docstring'''
with open(A_ ) as f:
return json.load(A_ )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : List[str] = git.Repo(search_parent_directories=A_ )
_lowerCamelCase : str = {
'''repo_id''': str(A_ ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def snake_case_ ( A_ : Callable, A_ : Iterable ):
'''simple docstring'''
return list(map(A_, A_ ) )
def snake_case_ ( A_ : str, A_ : Tuple ):
'''simple docstring'''
with open(A_, '''wb''' ) as f:
return pickle.dump(A_, A_ )
def snake_case_ ( A_ : List[str] ):
'''simple docstring'''
def remove_articles(A_ : str ):
return re.sub(R'''\b(a|an|the)\b''', ''' ''', A_ )
def white_space_fix(A_ : Any ):
return " ".join(text.split() )
def remove_punc(A_ : List[Any] ):
_lowerCamelCase : Any = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(A_ : Optional[int] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(A_ ) ) ) )
def snake_case_ ( A_ : int, A_ : List[Any] ):
'''simple docstring'''
_lowerCamelCase : str = normalize_answer(A_ ).split()
_lowerCamelCase : int = normalize_answer(A_ ).split()
_lowerCamelCase : str = Counter(A_ ) & Counter(A_ )
_lowerCamelCase : Any = sum(common.values() )
if num_same == 0:
return 0
_lowerCamelCase : int = 1.0 * num_same / len(A_ )
_lowerCamelCase : str = 1.0 * num_same / len(A_ )
_lowerCamelCase : List[Any] = (2 * precision * recall) / (precision + recall)
return fa
def snake_case_ ( A_ : Dict, A_ : str ):
'''simple docstring'''
return normalize_answer(A_ ) == normalize_answer(A_ )
def snake_case_ ( A_ : List[str], A_ : List[str] ):
'''simple docstring'''
assert len(A_ ) == len(A_ )
_lowerCamelCase : Optional[Any] = 0
for hypo, pred in zip(A_, A_ ):
em += exact_match_score(A_, A_ )
if len(A_ ) > 0:
em /= len(A_ )
return {"em": em}
def snake_case_ ( A_ : Optional[int] ):
'''simple docstring'''
return model_prefix.startswith('''rag''' )
def snake_case_ ( A_ : Dict, A_ : int, A_ : List[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_lowerCamelCase : Tuple = '''dropout_rate'''
for p in extra_params:
if getattr(A_, A_, A_ ):
if not hasattr(A_, A_ ) and not hasattr(A_, equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(A_ ) )
delattr(A_, A_ )
continue
_lowerCamelCase : Union[str, Any] = p if hasattr(A_, A_ ) else equivalent_param[p]
setattr(A_, A_, getattr(A_, A_ ) )
delattr(A_, A_ )
return hparams, config
| 83 | 0 |
class __magic_name__ :
'''simple docstring'''
def __init__( self:List[Any] , _a:Any , _a:Optional[int] , _a:Optional[int] ):
snake_case__ = name
snake_case__ = value
snake_case__ = weight
def __repr__( self:Tuple ):
return F"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
return self.value
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
return self.name
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
return self.weight
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
return self.value / self.weight
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
snake_case__ = []
for i in range(len(A_ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
snake_case__ = sorted(A_ , key=A_ , reverse=A_ )
snake_case__ = []
snake_case__ = 0.0, 0.0
for i in range(len(A_ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class __snake_case ( _lowercase):
snake_case__ : Optional[Any] = "camembert"
def __init__( self : Optional[Any] , __lowerCAmelCase : Any=3_0_5_2_2 , __lowerCAmelCase : List[str]=7_6_8 , __lowerCAmelCase : List[str]=1_2 , __lowerCAmelCase : Optional[int]=1_2 , __lowerCAmelCase : List[Any]=3_0_7_2 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Optional[int]=5_1_2 , __lowerCAmelCase : str=2 , __lowerCAmelCase : int=0.02 , __lowerCAmelCase : List[Any]=1E-12 , __lowerCAmelCase : Union[str, Any]=1 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : str="absolute" , __lowerCAmelCase : Any=True , __lowerCAmelCase : Optional[int]=None , **__lowerCAmelCase : Optional[int] , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : str = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Any = num_attention_heads
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : List[str] = intermediate_size
_lowerCamelCase : Optional[Any] = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Optional[Any] = max_position_embeddings
_lowerCamelCase : Tuple = type_vocab_size
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : Dict = layer_norm_eps
_lowerCamelCase : List[Any] = position_embedding_type
_lowerCamelCase : int = use_cache
_lowerCamelCase : List[str] = classifier_dropout
class __snake_case ( _lowercase):
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
if self.task == "multiple-choice":
_lowerCamelCase : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_lowerCamelCase : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 83 | 0 |
from __future__ import annotations
import math
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
SCREAMING_SNAKE_CASE_ : int = u
for i in range(1 , A_ ):
SCREAMING_SNAKE_CASE_ : int = temp * (u - i)
return temp
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
SCREAMING_SNAKE_CASE_ : int = int(input('enter the numbers of values: ' ) )
SCREAMING_SNAKE_CASE_ : list[list[float]] = []
for _ in range(A_ ):
y.append([] )
for i in range(A_ ):
for j in range(A_ ):
y[i].append(A_ )
SCREAMING_SNAKE_CASE_ : str = 0
print('enter the values of parameters in a list: ' )
SCREAMING_SNAKE_CASE_ : Tuple = list(map(A_ , input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(A_ ):
SCREAMING_SNAKE_CASE_ : Any = float(input() )
SCREAMING_SNAKE_CASE_ : Any = int(input('enter the value to interpolate: ' ) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , A_ ):
for j in range(n - i ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = y[j + 1][i - 1] - y[j][i - 1]
SCREAMING_SNAKE_CASE_ : str = y[0][0]
for i in range(1 , A_ ):
summ += (ucal(A_ , A_ ) * y[0][i]) / math.factorial(A_ )
print(f'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 345 |
"""simple docstring"""
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCAmelCase__ = '''<<<<<<< This should probably be modified because it mentions: '''
lowerCAmelCase__ = '''=======
>>>>>>>
'''
lowerCAmelCase__ = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
lowerCAmelCase__ = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def snake_case_ ( A_ : Namespace ):
'''simple docstring'''
return ConvertCommand(args.tfds_path, args.datasets_directory )
class __snake_case ( _lowercase):
@staticmethod
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase : ArgumentParser ):
"""simple docstring"""
_lowerCamelCase : List[str] = parser.add_parser(
'''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , )
train_parser.add_argument(
'''--tfds_path''' , type=__lowerCAmelCase , required=__lowerCAmelCase , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , )
train_parser.add_argument(
'''--datasets_directory''' , type=__lowerCAmelCase , required=__lowerCAmelCase , help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=__lowerCAmelCase )
def __init__( self : str , __lowerCAmelCase : str , __lowerCAmelCase : str , *__lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : List[str] = get_logger('''datasets-cli/converting''' )
_lowerCamelCase : int = tfds_path
_lowerCamelCase : Dict = datasets_directory
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
if os.path.isdir(self._tfds_path ):
_lowerCamelCase : Union[str, Any] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
_lowerCamelCase : Dict = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
_lowerCamelCase : int = os.path.abspath(self._datasets_directory )
self._logger.info(f'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
_lowerCamelCase : str = []
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : Union[str, Any] = {}
if os.path.isdir(self._tfds_path ):
_lowerCamelCase : List[str] = os.listdir(__lowerCAmelCase )
else:
_lowerCamelCase : Optional[Any] = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'''Looking at file {f_name}''' )
_lowerCamelCase : Union[str, Any] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Optional[Any] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
if not os.path.isfile(__lowerCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(__lowerCAmelCase , encoding='''utf-8''' ) as f:
_lowerCamelCase : Tuple = f.readlines()
_lowerCamelCase : Optional[int] = []
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : int = False
_lowerCamelCase : Tuple = []
for line in lines:
_lowerCamelCase : Optional[int] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
_lowerCamelCase : Union[str, Any] = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
_lowerCamelCase : List[str] = ''''''
continue
elif "from absl import logging" in out_line:
_lowerCamelCase : str = '''from datasets import logging\n'''
elif "getLogger" in out_line:
_lowerCamelCase : Union[str, Any] = out_line.replace('''getLogger''' , '''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
_lowerCamelCase : Dict = True
_lowerCamelCase : Optional[int] = list(filter(lambda __lowerCAmelCase : e in out_line , __lowerCAmelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(__lowerCAmelCase ) + '''\n''' )
out_lines.append(__lowerCAmelCase )
out_lines.append(__lowerCAmelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
_lowerCamelCase : str = re.sub(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
_lowerCamelCase : Dict = re.match(R'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , __lowerCAmelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
_lowerCamelCase : Union[str, Any] = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
_lowerCamelCase : Any = True
out_lines.append(__lowerCAmelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
_lowerCamelCase : Union[str, Any] = f_name.replace('''.py''' , '''''' )
_lowerCamelCase : List[str] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[Any] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
self._logger.info(f'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(__lowerCAmelCase )
if needs_manual_update:
with_manual_update.append(__lowerCAmelCase )
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.writelines(__lowerCAmelCase )
self._logger.info(f'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
_lowerCamelCase : Optional[int] = os.path.basename(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = imports_to_builder_map[f_name.replace('''.py''' , '''''' )]
self._logger.info(f'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(__lowerCAmelCase , __lowerCAmelCase )
except KeyError:
self._logger.error(f'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 83 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {
'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'],
'tokenization_roberta': ['RobertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['RobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaForCausalLM',
'RobertaForMaskedLM',
'RobertaForMultipleChoice',
'RobertaForQuestionAnswering',
'RobertaForSequenceClassification',
'RobertaForTokenClassification',
'RobertaModel',
'RobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaForCausalLM',
'TFRobertaForMaskedLM',
'TFRobertaForMultipleChoice',
'TFRobertaForQuestionAnswering',
'TFRobertaForSequenceClassification',
'TFRobertaForTokenClassification',
'TFRobertaMainLayer',
'TFRobertaModel',
'TFRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'FlaxRobertaForCausalLM',
'FlaxRobertaForMaskedLM',
'FlaxRobertaForMultipleChoice',
'FlaxRobertaForQuestionAnswering',
'FlaxRobertaForSequenceClassification',
'FlaxRobertaForTokenClassification',
'FlaxRobertaModel',
'FlaxRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 625 |
"""simple docstring"""
def snake_case_ ( A_ : list, A_ : list, A_ : int, A_ : int, A_ : int ):
'''simple docstring'''
if index == number_of_items:
return 0
_lowerCamelCase : int = 0
_lowerCamelCase : str = 0
_lowerCamelCase : Dict = knapsack(A_, A_, A_, A_, index + 1 )
if weights[index] <= max_weight:
_lowerCamelCase : Tuple = values[index] + knapsack(
A_, A_, A_, max_weight - weights[index], index + 1 )
return max(A_, A_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 | 0 |
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
lowerCamelCase = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class A ( unittest.TestCase ):
@classmethod
def lowerCamelCase ( cls : List[Any] ) -> List[str]:
"""simple docstring"""
_lowerCamelCase : Tuple =TOKEN
HfFolder.save_token(__lowerCAmelCase )
@classmethod
def lowerCamelCase ( cls : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='test-model-flax' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-model-flax-org' )
except HTTPError:
pass
def lowerCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_lowerCamelCase : Any =BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
_lowerCamelCase : Any =FlaxBertModel(__lowerCAmelCase )
model.push_to_hub('test-model-flax' , use_auth_token=self._token )
_lowerCamelCase : Union[str, Any] =FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
_lowerCamelCase : List[Any] =flatten_dict(unfreeze(model.params ) )
_lowerCamelCase : Tuple =flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_lowerCamelCase : int =(base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__lowerCAmelCase , 1E-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='test-model-flax' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__lowerCAmelCase , repo_id='test-model-flax' , push_to_hub=__lowerCAmelCase , use_auth_token=self._token )
_lowerCamelCase : Dict =FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
_lowerCamelCase : Tuple =flatten_dict(unfreeze(model.params ) )
_lowerCamelCase : Tuple =flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_lowerCamelCase : Optional[Any] =(base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__lowerCAmelCase , 1E-3 , msg=F'''{key} not identical''' )
def lowerCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[Any] =BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
_lowerCamelCase : Optional[int] =FlaxBertModel(__lowerCAmelCase )
model.push_to_hub('valid_org/test-model-flax-org' , use_auth_token=self._token )
_lowerCamelCase : Optional[Any] =FlaxBertModel.from_pretrained('valid_org/test-model-flax-org' )
_lowerCamelCase : int =flatten_dict(unfreeze(model.params ) )
_lowerCamelCase : Dict =flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_lowerCamelCase : Optional[Any] =(base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__lowerCAmelCase , 1E-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-model-flax-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
__lowerCAmelCase , repo_id='valid_org/test-model-flax-org' , push_to_hub=__lowerCAmelCase , use_auth_token=self._token )
_lowerCamelCase : List[Any] =FlaxBertModel.from_pretrained('valid_org/test-model-flax-org' )
_lowerCamelCase : Tuple =flatten_dict(unfreeze(model.params ) )
_lowerCamelCase : List[Any] =flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_lowerCamelCase : str =(base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__lowerCAmelCase , 1E-3 , msg=F'''{key} not identical''' )
def a_ ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
_lowerCamelCase : Tuple =True
_lowerCamelCase : List[Any] =flatten_dict(modela.params )
_lowerCamelCase : str =flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
_lowerCamelCase : int =False
return models_are_equal
@require_flax
class A ( unittest.TestCase ):
def lowerCamelCase ( self : int ) -> int:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] =BertConfig.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
_lowerCamelCase : Any =FlaxBertModel(__lowerCAmelCase )
_lowerCamelCase : Any ='''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
with self.assertRaises(__lowerCAmelCase ):
_lowerCamelCase : str =FlaxBertModel.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] =FlaxBertModel.from_pretrained(__lowerCAmelCase , subfolder=__lowerCAmelCase )
self.assertTrue(check_models_equal(__lowerCAmelCase , __lowerCAmelCase ) )
def lowerCamelCase ( self : str ) -> int:
"""simple docstring"""
_lowerCamelCase : List[Any] =BertConfig.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
_lowerCamelCase : Tuple =FlaxBertModel(__lowerCAmelCase )
_lowerCamelCase : Tuple ='''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , max_shard_size='10KB' )
with self.assertRaises(__lowerCAmelCase ):
_lowerCamelCase : Optional[int] =FlaxBertModel.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : Dict =FlaxBertModel.from_pretrained(__lowerCAmelCase , subfolder=__lowerCAmelCase )
self.assertTrue(check_models_equal(__lowerCAmelCase , __lowerCAmelCase ) )
def lowerCamelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : Optional[int] ='''bert'''
_lowerCamelCase : Union[str, Any] ='''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(__lowerCAmelCase ):
_lowerCamelCase : Optional[Any] =FlaxBertModel.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : Optional[int] =FlaxBertModel.from_pretrained(__lowerCAmelCase , subfolder=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def lowerCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] ='''bert'''
_lowerCamelCase : Tuple ='''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(__lowerCAmelCase ):
_lowerCamelCase : str =FlaxBertModel.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] =FlaxBertModel.from_pretrained(__lowerCAmelCase , subfolder=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
| 464 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __snake_case ( metaclass=_lowercase):
snake_case__ : Optional[Any] = ["transformers", "torch", "note_seq"]
def __init__( self : Union[str, Any] , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : List[Any] ):
"""simple docstring"""
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[Any] , *__lowerCAmelCase : str , **__lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : int , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 83 | 0 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=True , UpperCamelCase__="pt" ):
UpperCAmelCase__ : str = {'''add_prefix_space''': True} if isinstance(A_ , A_ ) and not line.startswith(""" """ ) else {}
UpperCAmelCase__ : Union[str, Any] = padding_side
return tokenizer(
[line] , max_length=A_ , padding="""max_length""" if pad_to_max_length else None , truncation=A_ , return_tensors=A_ , add_special_tokens=A_ , **A_ , )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , ):
UpperCAmelCase__ : Optional[int] = input_ids.ne(A_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class _snake_case ( _lowercase ):
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase="train" , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="" , ):
super().__init__()
UpperCAmelCase__ : Optional[int] = Path(__lowerCAmelCase).joinpath(type_path + """.source""")
UpperCAmelCase__ : List[str] = Path(__lowerCAmelCase).joinpath(type_path + """.target""")
UpperCAmelCase__ : List[Any] = self.get_char_lens(self.src_file)
UpperCAmelCase__ : Optional[int] = max_source_length
UpperCAmelCase__ : Optional[Any] = max_target_length
assert min(self.src_lens) > 0, f'''found empty line in {self.src_file}'''
UpperCAmelCase__ : List[Any] = tokenizer
UpperCAmelCase__ : List[Any] = prefix
if n_obs is not None:
UpperCAmelCase__ : List[str] = self.src_lens[:n_obs]
UpperCAmelCase__ : int = src_lang
UpperCAmelCase__ : Union[str, Any] = tgt_lang
def __len__( self):
return len(self.src_lens)
def __getitem__( self , _lowerCamelCase):
UpperCAmelCase__ : str = index + 1 # linecache starts at 1
UpperCAmelCase__ : Union[str, Any] = self.prefix + linecache.getline(str(self.src_file) , __lowerCAmelCase).rstrip("""\n""")
UpperCAmelCase__ : Optional[Any] = linecache.getline(str(self.tgt_file) , __lowerCAmelCase).rstrip("""\n""")
assert source_line, f'''empty source line for index {index}'''
assert tgt_line, f'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , __lowerCAmelCase):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
UpperCAmelCase__ : Optional[int] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , __lowerCAmelCase) else self.tokenizer
)
UpperCAmelCase__ : Union[str, Any] = self.tokenizer.generator if isinstance(self.tokenizer , __lowerCAmelCase) else self.tokenizer
UpperCAmelCase__ : List[str] = encode_line(__lowerCAmelCase , __lowerCAmelCase , self.max_source_length , """right""")
UpperCAmelCase__ : List[str] = encode_line(__lowerCAmelCase , __lowerCAmelCase , self.max_target_length , """right""")
UpperCAmelCase__ : Optional[Any] = source_inputs['''input_ids'''].squeeze()
UpperCAmelCase__ : Union[str, Any] = target_inputs['''input_ids'''].squeeze()
UpperCAmelCase__ : Any = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def snake_case__ ( _lowerCamelCase):
return [len(__lowerCAmelCase) for x in Path(__lowerCAmelCase).open().readlines()]
def snake_case__ ( self , _lowerCamelCase):
UpperCAmelCase__ : List[Any] = torch.stack([x["""input_ids"""] for x in batch])
UpperCAmelCase__ : Tuple = torch.stack([x["""attention_mask"""] for x in batch])
UpperCAmelCase__ : Union[str, Any] = torch.stack([x["""decoder_input_ids"""] for x in batch])
UpperCAmelCase__ : Tuple = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , __lowerCAmelCase)
else self.tokenizer.pad_token_id
)
UpperCAmelCase__ : Tuple = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , __lowerCAmelCase)
else self.tokenizer.pad_token_id
)
UpperCAmelCase__ : Union[str, Any] = trim_batch(__lowerCAmelCase , __lowerCAmelCase)
UpperCAmelCase__ : List[str] = trim_batch(__lowerCAmelCase , __lowerCAmelCase , attention_mask=__lowerCAmelCase)
UpperCAmelCase__ : Optional[int] = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
__A =getLogger(__name__)
def _UpperCamelCase ( UpperCamelCase__ ):
return list(itertools.chain.from_iterable(A_ ) )
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : Dict = get_git_info()
save_json(A_ , os.path.join(A_ , """git_log.json""" ) )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=4 , **UpperCamelCase__ ):
with open(A_ , """w""" ) as f:
json.dump(A_ , A_ , indent=A_ , **A_ )
def _UpperCamelCase ( UpperCamelCase__ ):
with open(A_ ) as f:
return json.load(A_ )
def _UpperCamelCase ( ):
UpperCAmelCase__ : List[str] = git.Repo(search_parent_directories=A_ )
UpperCAmelCase__ : str = {
'''repo_id''': str(A_ ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
return list(map(A_ , A_ ) )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
with open(A_ , """wb""" ) as f:
return pickle.dump(A_ , A_ )
def _UpperCamelCase ( UpperCamelCase__ ):
def remove_articles(UpperCamelCase__ ):
return re.sub(R"""\b(a|an|the)\b""" , """ """ , A_ )
def white_space_fix(UpperCamelCase__ ):
return " ".join(text.split() )
def remove_punc(UpperCamelCase__ ):
UpperCAmelCase__ : Any = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCamelCase__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(A_ ) ) ) )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : str = normalize_answer(A_ ).split()
UpperCAmelCase__ : int = normalize_answer(A_ ).split()
UpperCAmelCase__ : str = Counter(A_ ) & Counter(A_ )
UpperCAmelCase__ : Any = sum(common.values() )
if num_same == 0:
return 0
UpperCAmelCase__ : int = 1.0 * num_same / len(A_ )
UpperCAmelCase__ : str = 1.0 * num_same / len(A_ )
UpperCAmelCase__ : List[Any] = (2 * precision * recall) / (precision + recall)
return fa
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
return normalize_answer(A_ ) == normalize_answer(A_ )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
assert len(A_ ) == len(A_ )
UpperCAmelCase__ : Optional[Any] = 0
for hypo, pred in zip(A_ , A_ ):
em += exact_match_score(A_ , A_ )
if len(A_ ) > 0:
em /= len(A_ )
return {"em": em}
def _UpperCamelCase ( UpperCamelCase__ ):
return model_prefix.startswith("""rag""" )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Dict = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
UpperCAmelCase__ : Tuple = '''dropout_rate'''
for p in extra_params:
if getattr(A_ , A_ , A_ ):
if not hasattr(A_ , A_ ) and not hasattr(A_ , equivalent_param[p] ):
logger.info("""config doesn\'t have a `{}` attribute""".format(A_ ) )
delattr(A_ , A_ )
continue
UpperCAmelCase__ : Union[str, Any] = p if hasattr(A_ , A_ ) else equivalent_param[p]
setattr(A_ , A_ , getattr(A_ , A_ ) )
delattr(A_ , A_ )
return hparams, config | 407 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __snake_case ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Any = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
_lowerCamelCase : Any = AutoTokenizer.from_pretrained('''google/mt5-small''' )
_lowerCamelCase : Union[str, Any] = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
_lowerCamelCase : Optional[int] = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
_lowerCamelCase : List[Any] = shift_tokens_right(__lowerCAmelCase , model.config.pad_token_id , model.config.decoder_start_token_id )
_lowerCamelCase : int = model(__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase ).logits
_lowerCamelCase : Optional[Any] = optax.softmax_cross_entropy(__lowerCAmelCase , onehot(__lowerCAmelCase , logits.shape[-1] ) ).mean()
_lowerCamelCase : Dict = -(labels.shape[-1] * loss.item())
_lowerCamelCase : Dict = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 83 | 0 |
'''simple docstring'''
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
__lowerCamelCase : str = datasets.logging.get_logger(__name__)
__lowerCamelCase : Union[str, Any] = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n'
__lowerCamelCase : str = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n'
__lowerCamelCase : Union[str, Any] = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n'
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="dummy_doc" ):
"""simple docstring"""
_UpperCamelCase ={doc: key_lines}
_UpperCamelCase ={doc: sys_lines}
_UpperCamelCase ={}
_UpperCamelCase =0
_UpperCamelCase =0
_UpperCamelCase =0
_UpperCamelCase =0
_UpperCamelCase =0
_UpperCamelCase =0
_UpperCamelCase =reader.get_doc_mentions(A_ , key_doc_lines[doc] , A_ )
key_singletons_num += singletons_num
if NP_only or min_span:
_UpperCamelCase =reader.set_annotated_parse_trees(A_ , key_doc_lines[doc] , A_ , A_ )
_UpperCamelCase =reader.get_doc_mentions(A_ , sys_doc_lines[doc] , A_ )
sys_singletons_num += singletons_num
if NP_only or min_span:
_UpperCamelCase =reader.set_annotated_parse_trees(A_ , key_doc_lines[doc] , A_ , A_ )
if remove_nested:
_UpperCamelCase =reader.remove_nested_coref_mentions(A_ , A_ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
_UpperCamelCase =reader.remove_nested_coref_mentions(A_ , A_ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
_UpperCamelCase =reader.get_mention_assignments(A_ , A_ )
_UpperCamelCase =reader.get_mention_assignments(A_ , A_ )
_UpperCamelCase =(key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
f'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' )
logger.info(
'''Number of resulting singleton clusters in the key '''
f'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' )
if not keep_singletons:
logger.info(
f'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
'''files, respectively''' )
return doc_coref_infos
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =get_coref_infos(A_ , A_ , A_ , A_ , A_ , A_ )
_UpperCamelCase ={}
_UpperCamelCase =0
_UpperCamelCase =0
for name, metric in metrics:
_UpperCamelCase =evaluator.evaluate_documents(A_ , A_ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f'''{name}/recall''': recall, f'''{name}/precision''': precision, f'''{name}/f1''': fa} )
logger.info(
name.ljust(10 ) , f'''Recall: {recall * 100:.2f}''' , f''' Precision: {precision * 100:.2f}''' , f''' F1: {fa * 100:.2f}''' , )
if conll_subparts_num == 3:
_UpperCamelCase =(conll / 3) * 100
logger.info(f'''CoNLL score: {conll:.2f}''' )
output_scores.update({'''conll_score''': conll} )
return output_scores
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
_UpperCamelCase =line.split()[5]
if not parse_col == "-":
_UpperCamelCase =True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase ( datasets.Metric):
"""simple docstring"""
def UpperCamelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Sequence(datasets.Value('''string''' ) ),
} ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def UpperCamelCase__ ( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : int=False ) -> Tuple:
_UpperCamelCase =[
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
_UpperCamelCase =util.check_gold_parse_annotation(__lowerCAmelCase )
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
_UpperCamelCase =evaluate(
key_lines=__lowerCAmelCase , sys_lines=__lowerCAmelCase , metrics=__lowerCAmelCase , NP_only=__lowerCAmelCase , remove_nested=__lowerCAmelCase , keep_singletons=__lowerCAmelCase , min_span=__lowerCAmelCase , )
return score
| 404 |
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'''files''', [
['''full:README.md''', '''dataset_infos.json'''],
['''empty:README.md''', '''dataset_infos.json'''],
['''dataset_infos.json'''],
['''full:README.md'''],
], )
def snake_case_ ( A_ : Dict, A_ : List[str] ):
'''simple docstring'''
_lowerCamelCase : int = tmp_path_factory.mktemp('''dset_infos_dir''' )
if "full:README.md" in files:
with open(dataset_infos_dir / '''README.md''', '''w''' ) as f:
f.write('''---\ndataset_info:\n dataset_size: 42\n---''' )
if "empty:README.md" in files:
with open(dataset_infos_dir / '''README.md''', '''w''' ) as f:
f.write('''''' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / '''dataset_infos.json''', '''w''' ) as f:
f.write('''{"default": {"dataset_size": 42}}''' )
_lowerCamelCase : str = DatasetInfosDict.from_directory(A_ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'''dataset_info''', [
DatasetInfo(),
DatasetInfo(
description='''foo''', features=Features({'''a''': Value('''int32''' )} ), builder_name='''builder''', config_name='''config''', version='''1.0.0''', splits=[{'''name''': '''train'''}], download_size=42, ),
], )
def snake_case_ ( A_ : str, A_ : DatasetInfo ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = str(A_ )
dataset_info.write_to_directory(A_ )
_lowerCamelCase : str = DatasetInfo.from_directory(A_ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(A_, '''dataset_info.json''' ) )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = DatasetInfo(
description='''foo''', citation='''bar''', homepage='''https://foo.bar''', license='''CC0''', features=Features({'''a''': Value('''int32''' )} ), post_processed={}, supervised_keys=(), task_templates=[], builder_name='''builder''', config_name='''config''', version='''1.0.0''', splits=[{'''name''': '''train''', '''num_examples''': 42}], download_checksums={}, download_size=13_37, post_processing_size=4_42, dataset_size=12_34, size_in_bytes=13_37 + 4_42 + 12_34, )
_lowerCamelCase : Optional[Any] = dataset_info._to_yaml_dict()
assert sorted(A_ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key], (list, dict, int, str) )
_lowerCamelCase : str = yaml.safe_dump(A_ )
_lowerCamelCase : Tuple = yaml.safe_load(A_ )
assert dataset_info_yaml_dict == reloaded
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : int = DatasetInfo()
_lowerCamelCase : Dict = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'''dataset_infos_dict''', [
DatasetInfosDict(),
DatasetInfosDict({'''default''': DatasetInfo()} ),
DatasetInfosDict({'''my_config_name''': DatasetInfo()} ),
DatasetInfosDict(
{
'''default''': DatasetInfo(
description='''foo''', features=Features({'''a''': Value('''int32''' )} ), builder_name='''builder''', config_name='''config''', version='''1.0.0''', splits=[{'''name''': '''train'''}], download_size=42, )
} ),
DatasetInfosDict(
{
'''v1''': DatasetInfo(dataset_size=42 ),
'''v2''': DatasetInfo(dataset_size=13_37 ),
} ),
], )
def snake_case_ ( A_ : Optional[Any], A_ : DatasetInfosDict ):
'''simple docstring'''
_lowerCamelCase : List[str] = str(A_ )
dataset_infos_dict.write_to_directory(A_ )
_lowerCamelCase : List[Any] = DatasetInfosDict.from_directory(A_ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_lowerCamelCase : str = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_lowerCamelCase : Any = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(A_, '''README.md''' ) )
| 83 | 0 |
from __future__ import annotations
from PIL import Image
# Define glider example
lowerCamelCase__ = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
lowerCamelCase__ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def A(__a: list[list[int]] ):
lowerCAmelCase_ = []
for i in range(len(A_ ) ):
lowerCAmelCase_ = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
lowerCAmelCase_ = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(A_ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(A_ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(A_ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
lowerCAmelCase_ = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(A_ )
return next_generation
def A(__a: list[list[int]] , __a: int ):
lowerCAmelCase_ = []
for _ in range(A_ ):
# Create output image
lowerCAmelCase_ = Image.new("RGB" , (len(cells[0] ), len(A_ )) )
lowerCAmelCase_ = img.load()
# Save cells to image
for x in range(len(A_ ) ):
for y in range(len(cells[0] ) ):
lowerCAmelCase_ = 255 - cells[y][x] * 255
lowerCAmelCase_ = (colour, colour, colour)
# Save image
images.append(A_ )
lowerCAmelCase_ = new_generation(A_ )
return images
if __name__ == "__main__":
lowerCamelCase__ = generate_images(GLIDER, 16)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 122 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class __snake_case :
def __init__( self : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple=1_3 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : List[str]=2_4 , __lowerCAmelCase : str=1_6 , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Optional[Any]=3_2 , __lowerCAmelCase : List[Any]=5 , __lowerCAmelCase : int=4 , __lowerCAmelCase : int=3_7 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : int=1_0 , __lowerCAmelCase : List[Any]=0.02 , __lowerCAmelCase : str=None , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Union[str, Any]=2 , ):
"""simple docstring"""
_lowerCamelCase : List[str] = parent
_lowerCamelCase : str = batch_size
_lowerCamelCase : Tuple = patch_size
_lowerCamelCase : Optional[int] = max_length
_lowerCamelCase : List[Any] = num_mel_bins
_lowerCamelCase : int = is_training
_lowerCamelCase : Union[str, Any] = use_labels
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : Tuple = num_hidden_layers
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : Tuple = intermediate_size
_lowerCamelCase : List[str] = hidden_act
_lowerCamelCase : Dict = hidden_dropout_prob
_lowerCamelCase : int = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = type_sequence_label_size
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : List[str] = scope
_lowerCamelCase : Optional[int] = frequency_stride
_lowerCamelCase : List[Any] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_lowerCamelCase : Optional[int] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
_lowerCamelCase : Union[str, Any] = (self.max_length - self.patch_size) // self.time_stride + 1
_lowerCamelCase : Any = frequency_out_dimension * time_out_dimension
_lowerCamelCase : List[Any] = num_patches + 2
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : int = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
_lowerCamelCase : str = None
if self.use_labels:
_lowerCamelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : Optional[int] = self.get_config()
return config, input_values, labels
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : List[Any] = ASTModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : List[Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : int = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Optional[Any] = config_and_inputs
_lowerCamelCase : int = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : List[Any] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
snake_case__ : Tuple = (
{"audio-classification": ASTForAudioClassification, "feature-extraction": ASTModel}
if is_torch_available()
else {}
)
snake_case__ : Any = False
snake_case__ : List[Any] = False
snake_case__ : Optional[Any] = False
snake_case__ : Optional[Any] = False
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = ASTModelTester(self )
_lowerCamelCase : Any = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Dict = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCamelCase : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : str = model_class(__lowerCAmelCase )
_lowerCamelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Any = [*signature.parameters.keys()]
_lowerCamelCase : str = ['''input_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Union[str, Any] = ASTModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : List[str] = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''', filename='''sample_audio.flac''', repo_type='''dataset''' )
_lowerCamelCase , _lowerCamelCase : str = torchaudio.load(A_ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class __snake_case ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : int = self.default_feature_extractor
_lowerCamelCase : Union[str, Any] = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(__lowerCAmelCase )
_lowerCamelCase : List[Any] = self.default_feature_extractor
_lowerCamelCase , _lowerCamelCase : List[Any] = prepare_audio()
_lowerCamelCase : Dict = audio.squeeze().numpy()
_lowerCamelCase : Tuple = feature_extractor(__lowerCAmelCase , sampling_rate=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCamelCase : Tuple = model(**__lowerCAmelCase )
# verify the logits
_lowerCamelCase : Tuple = torch.Size((1, 5_2_7) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
_lowerCamelCase : Optional[int] = torch.tensor([-0.87_60, -7.00_42, -8.66_02] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
| 83 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""naver-clova-ix/donut-base""": """https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json""",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class __UpperCamelCase ( _lowercase ):
_UpperCAmelCase = "donut-swin"
_UpperCAmelCase = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self ,_A=224 ,_A=4 ,_A=3 ,_A=96 ,_A=[2, 2, 6, 2] ,_A=[3, 6, 12, 24] ,_A=7 ,_A=4.0 ,_A=True ,_A=0.0 ,_A=0.0 ,_A=0.1 ,_A="gelu" ,_A=False ,_A=0.0_2 ,_A=1E-5 ,**_A ,):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
_lowerCAmelCase : Any = image_size
_lowerCAmelCase : List[str] = patch_size
_lowerCAmelCase : Optional[Any] = num_channels
_lowerCAmelCase : Any = embed_dim
_lowerCAmelCase : List[Any] = depths
_lowerCAmelCase : Tuple = len(__lowerCAmelCase )
_lowerCAmelCase : Optional[int] = num_heads
_lowerCAmelCase : Optional[Any] = window_size
_lowerCAmelCase : List[str] = mlp_ratio
_lowerCAmelCase : List[str] = qkv_bias
_lowerCAmelCase : Dict = hidden_dropout_prob
_lowerCAmelCase : str = attention_probs_dropout_prob
_lowerCAmelCase : Union[str, Any] = drop_path_rate
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : Dict = use_absolute_embeddings
_lowerCAmelCase : Optional[int] = layer_norm_eps
_lowerCAmelCase : List[str] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase : List[str] = int(embed_dim * 2 ** (len(__lowerCAmelCase ) - 1) )
| 259 |
"""simple docstring"""
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def snake_case_ ( A_ : Dict, A_ : bool = True, A_ : float = math.inf, A_ : float = -math.inf, A_ : float = math.inf, A_ : float = -math.inf, A_ : bool = False, A_ : float = 1_00, A_ : float = 0.01, A_ : float = 1, ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : str = search_prob
_lowerCamelCase : str = start_temperate
_lowerCamelCase : Optional[Any] = []
_lowerCamelCase : int = 0
_lowerCamelCase : Any = None
while not search_end:
_lowerCamelCase : Dict = current_state.score()
if best_state is None or current_score > best_state.score():
_lowerCamelCase : Tuple = current_state
scores.append(A_ )
iterations += 1
_lowerCamelCase : List[Any] = None
_lowerCamelCase : Optional[int] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_lowerCamelCase : List[Any] = random.randint(0, len(A_ ) - 1 ) # picking a random neighbor
_lowerCamelCase : Dict = neighbors.pop(A_ )
_lowerCamelCase : Union[str, Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_lowerCamelCase : str = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_lowerCamelCase : Optional[Any] = picked_neighbor
else:
_lowerCamelCase : Optional[int] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_lowerCamelCase : Union[str, Any] = picked_neighbor
_lowerCamelCase : List[str] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_lowerCamelCase : Tuple = True
else:
_lowerCamelCase : Optional[Any] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(A_ ), A_ )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def snake_case_ ( A_ : int, A_ : Tuple ):
'''simple docstring'''
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase__ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase__ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def snake_case_ ( A_ : Optional[int], A_ : List[Any] ):
'''simple docstring'''
return (3 * x**2) - (6 * y)
lowerCAmelCase__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F"""{local_min.score()}"""
)
lowerCAmelCase__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F"""{local_min.score()}"""
)
| 83 | 0 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a_ : int = logging.get_logger(__name__)
a_ : int = {
"microsoft/conditional-detr-resnet-50": (
"https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"
),
}
class UpperCamelCase ( _lowercase ):
__UpperCamelCase ="conditional_detr"
__UpperCamelCase =["past_key_values"]
__UpperCamelCase ={
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : List[Any] , snake_case__ : Optional[Any]=True , snake_case__ : List[str]=None , snake_case__ : Optional[int]=3 , snake_case__ : List[Any]=3_0_0 , snake_case__ : Any=6 , snake_case__ : str=2_0_4_8 , snake_case__ : Union[str, Any]=8 , snake_case__ : Any=6 , snake_case__ : str=2_0_4_8 , snake_case__ : Optional[int]=8 , snake_case__ : Optional[int]=0.0 , snake_case__ : Optional[int]=0.0 , snake_case__ : Tuple=True , snake_case__ : str="relu" , snake_case__ : Any=2_5_6 , snake_case__ : Optional[int]=0.1 , snake_case__ : Tuple=0.0 , snake_case__ : List[Any]=0.0 , snake_case__ : int=0.02 , snake_case__ : Any=1.0 , snake_case__ : Union[str, Any]=False , snake_case__ : Optional[int]="sine" , snake_case__ : str="resnet50" , snake_case__ : Optional[int]=True , snake_case__ : Optional[int]=False , snake_case__ : Dict=2 , snake_case__ : List[Any]=5 , snake_case__ : int=2 , snake_case__ : Tuple=1 , snake_case__ : Any=1 , snake_case__ : Optional[Any]=2 , snake_case__ : Optional[int]=5 , snake_case__ : Tuple=2 , snake_case__ : int=0.25 , **snake_case__ : Optional[Any] , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
SCREAMING_SNAKE_CASE = CONFIG_MAPPING['''resnet'''](out_features=['stage4'] )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE = backbone_config.get('model_type' )
SCREAMING_SNAKE_CASE = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE = config_class.from_dict(__lowerCAmelCase )
SCREAMING_SNAKE_CASE = use_timm_backbone
SCREAMING_SNAKE_CASE = backbone_config
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = num_queries
SCREAMING_SNAKE_CASE = d_model
SCREAMING_SNAKE_CASE = encoder_ffn_dim
SCREAMING_SNAKE_CASE = encoder_layers
SCREAMING_SNAKE_CASE = encoder_attention_heads
SCREAMING_SNAKE_CASE = decoder_ffn_dim
SCREAMING_SNAKE_CASE = decoder_layers
SCREAMING_SNAKE_CASE = decoder_attention_heads
SCREAMING_SNAKE_CASE = dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = activation_dropout
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = init_std
SCREAMING_SNAKE_CASE = init_xavier_std
SCREAMING_SNAKE_CASE = encoder_layerdrop
SCREAMING_SNAKE_CASE = decoder_layerdrop
SCREAMING_SNAKE_CASE = encoder_layers
SCREAMING_SNAKE_CASE = auxiliary_loss
SCREAMING_SNAKE_CASE = position_embedding_type
SCREAMING_SNAKE_CASE = backbone
SCREAMING_SNAKE_CASE = use_pretrained_backbone
SCREAMING_SNAKE_CASE = dilation
# Hungarian matcher
SCREAMING_SNAKE_CASE = class_cost
SCREAMING_SNAKE_CASE = bbox_cost
SCREAMING_SNAKE_CASE = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE = mask_loss_coefficient
SCREAMING_SNAKE_CASE = dice_loss_coefficient
SCREAMING_SNAKE_CASE = cls_loss_coefficient
SCREAMING_SNAKE_CASE = bbox_loss_coefficient
SCREAMING_SNAKE_CASE = giou_loss_coefficient
SCREAMING_SNAKE_CASE = focal_alpha
super().__init__(is_encoder_decoder=__lowerCAmelCase , **__lowerCAmelCase )
@property
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return self.d_model
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
SCREAMING_SNAKE_CASE = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
class UpperCamelCase ( _lowercase ):
__UpperCamelCase =version.parse("1.11" )
@property
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return 1E-5
@property
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return 1_2
| 439 |
"""simple docstring"""
from collections import namedtuple
lowerCAmelCase__ = namedtuple('''from_to''', '''from_ to''')
lowerCAmelCase__ = {
'''cubicmeter''': from_to(1, 1),
'''litre''': from_to(0.0_0_1, 1000),
'''kilolitre''': from_to(1, 1),
'''gallon''': from_to(0.0_0_4_5_4, 2_6_4.1_7_2),
'''cubicyard''': from_to(0.7_6_4_5_5, 1.3_0_7_9_5),
'''cubicfoot''': from_to(0.0_2_8, 3_5.3_1_4_7),
'''cup''': from_to(0.0_0_0_2_3_6_5_8_8, 4_2_2_6.7_5),
}
def snake_case_ ( A_ : float, A_ : str, A_ : str ):
'''simple docstring'''
if from_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ ''', '''.join(A_ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ ''', '''.join(A_ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 | 0 |
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
_snake_case = [
# (stable-diffusion, HF Diffusers)
('''time_embed.0.weight''', '''time_embedding.linear_1.weight'''),
('''time_embed.0.bias''', '''time_embedding.linear_1.bias'''),
('''time_embed.2.weight''', '''time_embedding.linear_2.weight'''),
('''time_embed.2.bias''', '''time_embedding.linear_2.bias'''),
('''input_blocks.0.0.weight''', '''conv_in.weight'''),
('''input_blocks.0.0.bias''', '''conv_in.bias'''),
('''out.0.weight''', '''conv_norm_out.weight'''),
('''out.0.bias''', '''conv_norm_out.bias'''),
('''out.2.weight''', '''conv_out.weight'''),
('''out.2.bias''', '''conv_out.bias'''),
]
_snake_case = [
# (stable-diffusion, HF Diffusers)
('''in_layers.0''', '''norm1'''),
('''in_layers.2''', '''conv1'''),
('''out_layers.0''', '''norm2'''),
('''out_layers.3''', '''conv2'''),
('''emb_layers.1''', '''time_emb_proj'''),
('''skip_connection''', '''conv_shortcut'''),
]
_snake_case = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
_snake_case = f'''down_blocks.{i}.resnets.{j}.'''
_snake_case = f'''input_blocks.{3*i + j + 1}.0.'''
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
_snake_case = f'''down_blocks.{i}.attentions.{j}.'''
_snake_case = f'''input_blocks.{3*i + j + 1}.1.'''
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
_snake_case = f'''up_blocks.{i}.resnets.{j}.'''
_snake_case = f'''output_blocks.{3*i + j}.0.'''
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
_snake_case = f'''up_blocks.{i}.attentions.{j}.'''
_snake_case = f'''output_blocks.{3*i + j}.1.'''
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
_snake_case = f'''down_blocks.{i}.downsamplers.0.conv.'''
_snake_case = f'''input_blocks.{3*(i+1)}.0.op.'''
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
_snake_case = f'''up_blocks.{i}.upsamplers.0.'''
_snake_case = f'''output_blocks.{3*i + 2}.{1 if i == 0 else 2}.'''
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
_snake_case = '''mid_block.attentions.0.'''
_snake_case = '''middle_block.1.'''
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
_snake_case = f'''mid_block.resnets.{j}.'''
_snake_case = f'''middle_block.{2*j}.'''
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : int = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
lowerCamelCase : Optional[Any] = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
lowerCamelCase : List[Any] = v.replace(A_ , A_ )
lowerCamelCase : Optional[Any] = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
lowerCamelCase : Optional[Any] = v.replace(A_ , A_ )
lowerCamelCase : Optional[Any] = v
lowerCamelCase : List[str] = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
_snake_case = [
# (stable-diffusion, HF Diffusers)
('''nin_shortcut''', '''conv_shortcut'''),
('''norm_out''', '''conv_norm_out'''),
('''mid.attn_1.''', '''mid_block.attentions.0.'''),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
_snake_case = f'''encoder.down_blocks.{i}.resnets.{j}.'''
_snake_case = f'''encoder.down.{i}.block.{j}.'''
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
_snake_case = f'''down_blocks.{i}.downsamplers.0.'''
_snake_case = f'''down.{i}.downsample.'''
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
_snake_case = f'''up_blocks.{i}.upsamplers.0.'''
_snake_case = f'''up.{3-i}.upsample.'''
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
_snake_case = f'''decoder.up_blocks.{i}.resnets.{j}.'''
_snake_case = f'''decoder.up.{3-i}.block.{j}.'''
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
_snake_case = f'''mid_block.resnets.{i}.'''
_snake_case = f'''mid.block_{i+1}.'''
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
_snake_case = [
# (stable-diffusion, HF Diffusers)
('''norm.''', '''group_norm.'''),
('''q.''', '''query.'''),
('''k.''', '''key.'''),
('''v.''', '''value.'''),
('''proj_out.''', '''proj_attn.'''),
]
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return w.reshape(*w.shape , 1 , 1 )
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
lowerCamelCase : Tuple = v.replace(A_ , A_ )
lowerCamelCase : str = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
lowerCamelCase : Tuple = v.replace(A_ , A_ )
lowerCamelCase : Union[str, Any] = v
lowerCamelCase : List[Any] = {v: vae_state_dict[k] for k, v in mapping.items()}
lowerCamelCase : Optional[int] = ['''q''', '''k''', '''v''', '''proj_out''']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f"""mid.attn_1.{weight_name}.weight""" in k:
print(f"""Reshaping {k} for SD format""" )
lowerCamelCase : str = reshape_weight_for_sd(A_ )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
_snake_case = [
# (stable-diffusion, HF Diffusers)
('''resblocks.''', '''text_model.encoder.layers.'''),
('''ln_1''', '''layer_norm1'''),
('''ln_2''', '''layer_norm2'''),
('''.c_fc.''', '''.fc1.'''),
('''.c_proj.''', '''.fc2.'''),
('''.attn''', '''.self_attn'''),
('''ln_final.''', '''transformer.text_model.final_layer_norm.'''),
('''token_embedding.weight''', '''transformer.text_model.embeddings.token_embedding.weight'''),
('''positional_embedding''', '''transformer.text_model.embeddings.position_embedding.weight'''),
]
_snake_case = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
_snake_case = re.compile('''|'''.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
_snake_case = {'''q''': 0, '''k''': 1, '''v''': 2}
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : int = {}
lowerCamelCase : Optional[Any] = {}
lowerCamelCase : Dict = {}
for k, v in text_enc_dict.items():
if (
k.endswith(".self_attn.q_proj.weight" )
or k.endswith(".self_attn.k_proj.weight" )
or k.endswith(".self_attn.v_proj.weight" )
):
lowerCamelCase : Union[str, Any] = k[: -len(".q_proj.weight" )]
lowerCamelCase : int = k[-len("q_proj.weight" )]
if k_pre not in capture_qkv_weight:
lowerCamelCase : Any = [None, None, None]
lowerCamelCase : List[Any] = v
continue
if (
k.endswith(".self_attn.q_proj.bias" )
or k.endswith(".self_attn.k_proj.bias" )
or k.endswith(".self_attn.v_proj.bias" )
):
lowerCamelCase : int = k[: -len(".q_proj.bias" )]
lowerCamelCase : Union[str, Any] = k[-len("q_proj.bias" )]
if k_pre not in capture_qkv_bias:
lowerCamelCase : List[str] = [None, None, None]
lowerCamelCase : int = v
continue
lowerCamelCase : List[str] = textenc_pattern.sub(lambda SCREAMING_SNAKE_CASE_ : protected[re.escape(m.group(0 ) )] , A_ )
lowerCamelCase : List[Any] = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
lowerCamelCase : Tuple = textenc_pattern.sub(lambda SCREAMING_SNAKE_CASE_ : protected[re.escape(m.group(0 ) )] , A_ )
lowerCamelCase : Dict = torch.cat(A_ )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
lowerCamelCase : Any = textenc_pattern.sub(lambda SCREAMING_SNAKE_CASE_ : protected[re.escape(m.group(0 ) )] , A_ )
lowerCamelCase : Any = torch.cat(A_ )
return new_state_dict
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return text_enc_dict
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--use_safetensors''', action='''store_true''', help='''Save weights use safetensors, default is ckpt.'''
)
_snake_case = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
_snake_case = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.safetensors''')
_snake_case = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.safetensors''')
_snake_case = osp.join(args.model_path, '''text_encoder''', '''model.safetensors''')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
_snake_case = load_file(unet_path, device='''cpu''')
else:
_snake_case = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.bin''')
_snake_case = torch.load(unet_path, map_location='''cpu''')
if osp.exists(vae_path):
_snake_case = load_file(vae_path, device='''cpu''')
else:
_snake_case = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.bin''')
_snake_case = torch.load(vae_path, map_location='''cpu''')
if osp.exists(text_enc_path):
_snake_case = load_file(text_enc_path, device='''cpu''')
else:
_snake_case = osp.join(args.model_path, '''text_encoder''', '''pytorch_model.bin''')
_snake_case = torch.load(text_enc_path, map_location='''cpu''')
# Convert the UNet model
_snake_case = convert_unet_state_dict(unet_state_dict)
_snake_case = {'''model.diffusion_model.''' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
_snake_case = convert_vae_state_dict(vae_state_dict)
_snake_case = {'''first_stage_model.''' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
_snake_case = '''text_model.encoder.layers.22.layer_norm2.bias''' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
_snake_case = {'''transformer.''' + k: v for k, v in text_enc_dict.items()}
_snake_case = convert_text_enc_state_dict_vaa(text_enc_dict)
_snake_case = {'''cond_stage_model.model.''' + k: v for k, v in text_enc_dict.items()}
else:
_snake_case = convert_text_enc_state_dict(text_enc_dict)
_snake_case = {'''cond_stage_model.transformer.''' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
_snake_case = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
_snake_case = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
_snake_case = {'''state_dict''': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 340 |
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case ( _lowercase):
def __init__( self : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : int=1_3 , __lowerCAmelCase : Optional[int]=7 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : str=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : int=True , __lowerCAmelCase : List[str]=9_9 , __lowerCAmelCase : List[Any]=3_2 , __lowerCAmelCase : Union[str, Any]=5 , __lowerCAmelCase : List[str]=4 , __lowerCAmelCase : Optional[int]=3_7 , __lowerCAmelCase : List[Any]="gelu" , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : int=5_1_2 , __lowerCAmelCase : Tuple=1_6 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Any="None" , __lowerCAmelCase : str=3 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : Optional[Any]=None , ):
"""simple docstring"""
_lowerCamelCase : Dict = parent
_lowerCamelCase : Union[str, Any] = batch_size
_lowerCamelCase : Optional[Any] = seq_length
_lowerCamelCase : Optional[Any] = is_training
_lowerCamelCase : Dict = use_input_mask
_lowerCamelCase : Tuple = use_token_type_ids
_lowerCamelCase : Optional[Any] = use_labels
_lowerCamelCase : List[str] = vocab_size
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : int = num_hidden_layers
_lowerCamelCase : Optional[Any] = num_attention_heads
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : int = hidden_dropout_prob
_lowerCamelCase : Dict = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : List[Any] = type_sequence_label_size
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : Optional[int] = num_labels
_lowerCamelCase : Any = num_choices
_lowerCamelCase : int = relative_attention
_lowerCamelCase : Union[str, Any] = position_biased_input
_lowerCamelCase : str = pos_att_type
_lowerCamelCase : Tuple = scope
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : List[Any] = None
if self.use_input_mask:
_lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_lowerCamelCase : Any = None
if self.use_token_type_ids:
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase : Any = None
_lowerCamelCase : int = None
_lowerCamelCase : Union[str, Any] = None
if self.use_labels:
_lowerCamelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices )
_lowerCamelCase : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : str ):
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : List[str] = DebertaVaModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Dict = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )[0]
_lowerCamelCase : str = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )[0]
_lowerCamelCase : List[Any] = model(__lowerCAmelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : Tuple = DebertaVaForMaskedLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Optional[int] = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.num_labels
_lowerCamelCase : Dict = DebertaVaForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Dict = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.num_labels
_lowerCamelCase : Tuple = DebertaVaForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Any = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[str] = DebertaVaForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Tuple = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = DebertaVaForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : List[Any] = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Any = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCamelCase : Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : int = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
snake_case__ : int = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : List[str] = True
snake_case__ : List[Any] = False
snake_case__ : int = False
snake_case__ : Optional[Any] = False
snake_case__ : str = False
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : List[str] = DebertaVaModelTester(self )
_lowerCamelCase : Any = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*__lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Tuple = DebertaVaModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase):
@unittest.skip(reason='''Model not available yet''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
pass
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Tuple = DebertaVaModel.from_pretrained('''microsoft/deberta-v2-xlarge''' )
_lowerCamelCase : List[str] = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
_lowerCamelCase : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowerCamelCase : Tuple = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0]
# compare the actual values for a slice.
_lowerCamelCase : Union[str, Any] = torch.tensor(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCAmelCase , atol=1E-4 ) , f'''{output[:, 1:4, 1:4]}''' )
| 83 | 0 |
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
A__ : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} )
A__ : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
A__ : int = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A__ : bool = field(
default=_lowercase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _a ( self : int ):
"""simple docstring"""
A__ = self.task_name.lower()
class __lowerCAmelCase ( _lowercase ):
"""simple docstring"""
A__ : Union[str, Any] = "train"
A__ : Tuple = "dev"
A__ : int = "test"
class __lowerCAmelCase ( _lowercase ):
"""simple docstring"""
A__ : GlueDataTrainingArguments
A__ : str
A__ : List[InputFeatures]
def __init__( self : str , _snake_case : GlueDataTrainingArguments , _snake_case : PreTrainedTokenizerBase , _snake_case : Optional[int] = None , _snake_case : Union[str, Split] = Split.train , _snake_case : Optional[str] = None , ):
"""simple docstring"""
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , __lowerCAmelCase , )
A__ = args
A__ = glue_processors[args.task_name]()
A__ = glue_output_modes[args.task_name]
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
try:
A__ = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
# Load data features from cache or dataset file
A__ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}''' , )
A__ = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
A__ = label_list[2], label_list[1]
A__ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A__ = cached_features_file + '''.lock'''
with FileLock(__lowerCAmelCase ):
if os.path.exists(__lowerCAmelCase ) and not args.overwrite_cache:
A__ = time.time()
A__ = torch.load(__lowerCAmelCase )
logger.info(
F'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
else:
logger.info(F'''Creating features from dataset file at {args.data_dir}''' )
if mode == Split.dev:
A__ = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
A__ = self.processor.get_test_examples(args.data_dir )
else:
A__ = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
A__ = examples[:limit_length]
A__ = glue_convert_examples_to_features(
__lowerCAmelCase , __lowerCAmelCase , max_length=args.max_seq_length , label_list=__lowerCAmelCase , output_mode=self.output_mode , )
A__ = time.time()
torch.save(self.features , __lowerCAmelCase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self : List[Any] ):
"""simple docstring"""
return len(self.features )
def __getitem__( self : Optional[int] , _snake_case : Dict ):
"""simple docstring"""
return self.features[i]
def _a ( self : Tuple ):
"""simple docstring"""
return self.label_list
| 9 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
lowerCAmelCase__ = '''hf-internal-testing/tiny-random-bert'''
lowerCAmelCase__ = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''')
lowerCAmelCase__ = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6'''
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = cached_file(__lowerCAmelCase , __lowerCAmelCase )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(__lowerCAmelCase ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) ) )
with open(os.path.join(__lowerCAmelCase , '''refs''' , '''main''' ) ) as f:
_lowerCamelCase : Optional[int] = f.read()
self.assertEqual(__lowerCAmelCase , os.path.join(__lowerCAmelCase , '''snapshots''' , __lowerCAmelCase , __lowerCAmelCase ) )
self.assertTrue(os.path.isfile(__lowerCAmelCase ) )
# File is cached at the same place the second time.
_lowerCamelCase : Tuple = cached_file(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
# Using a specific revision to test the full commit hash.
_lowerCamelCase : Dict = cached_file(__lowerCAmelCase , __lowerCAmelCase , revision='''9b8c223''' )
self.assertEqual(__lowerCAmelCase , os.path.join(__lowerCAmelCase , '''snapshots''' , __lowerCAmelCase , __lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
with self.assertRaisesRegex(__lowerCAmelCase , '''is not a valid model identifier''' ):
_lowerCamelCase : Optional[int] = cached_file('''tiny-random-bert''' , __lowerCAmelCase )
with self.assertRaisesRegex(__lowerCAmelCase , '''is not a valid git identifier''' ):
_lowerCamelCase : str = cached_file(__lowerCAmelCase , __lowerCAmelCase , revision='''aaaa''' )
with self.assertRaisesRegex(__lowerCAmelCase , '''does not appear to have a file named''' ):
_lowerCamelCase : int = cached_file(__lowerCAmelCase , '''conf''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
with self.assertRaisesRegex(__lowerCAmelCase , '''does not appear to have a file named''' ):
_lowerCamelCase : Dict = cached_file(__lowerCAmelCase , '''conf''' )
with open(os.path.join(__lowerCAmelCase , '''refs''' , '''main''' ) ) as f:
_lowerCamelCase : List[Any] = f.read()
self.assertTrue(os.path.isfile(os.path.join(__lowerCAmelCase , '''.no_exist''' , __lowerCAmelCase , '''conf''' ) ) )
_lowerCamelCase : str = cached_file(__lowerCAmelCase , '''conf''' , _raise_exceptions_for_missing_entries=__lowerCAmelCase )
self.assertIsNone(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = cached_file(__lowerCAmelCase , '''conf''' , local_files_only=__lowerCAmelCase , _raise_exceptions_for_missing_entries=__lowerCAmelCase )
self.assertIsNone(__lowerCAmelCase )
_lowerCamelCase : Any = mock.Mock()
_lowerCamelCase : Optional[Any] = 5_0_0
_lowerCamelCase : Dict = {}
_lowerCamelCase : List[Any] = HTTPError
_lowerCamelCase : int = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=__lowerCAmelCase ) as mock_head:
_lowerCamelCase : Union[str, Any] = cached_file(__lowerCAmelCase , '''conf''' , _raise_exceptions_for_connection_errors=__lowerCAmelCase )
self.assertIsNone(__lowerCAmelCase )
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __lowerCAmelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __lowerCAmelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(__lowerCAmelCase , '''is not a valid model identifier''' ):
get_file_from_repo('''bert-base-case''' , __lowerCAmelCase )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(__lowerCAmelCase , '''is not a valid git identifier''' ):
get_file_from_repo('''bert-base-cased''' , __lowerCAmelCase , revision='''ahaha''' )
_lowerCamelCase : Dict = get_file_from_repo('''bert-base-cased''' , __lowerCAmelCase )
# The name is the cached name which is not very easy to test, so instead we load the content.
_lowerCamelCase : Dict = json.loads(open(__lowerCAmelCase , '''r''' ).read() )
self.assertEqual(config['''hidden_size'''] , 7_6_8 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : Any = Path(__lowerCAmelCase ) / '''a.txt'''
filename.touch()
self.assertEqual(get_file_from_repo(__lowerCAmelCase , '''a.txt''' ) , str(__lowerCAmelCase ) )
self.assertIsNone(get_file_from_repo(__lowerCAmelCase , '''b.txt''' ) )
| 83 | 0 |
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class __magic_name__ (_lowercase ):
'''simple docstring'''
__lowercase : Tuple = ["image_processor", "tokenizer"]
__lowercase : Dict = "BlipImageProcessor"
__lowercase : Optional[Any] = "AutoTokenizer"
def __init__( self:str , _a:Any , _a:Dict , _a:Tuple ):
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
# add QFormer tokenizer
snake_case__ = qformer_tokenizer
def __call__( self:Optional[Any] , _a:ImageInput = None , _a:Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _a:bool = True , _a:Union[bool, str, PaddingStrategy] = False , _a:Union[bool, str, TruncationStrategy] = None , _a:Optional[int] = None , _a:int = 0 , _a:Optional[int] = None , _a:Optional[bool] = None , _a:bool = False , _a:bool = False , _a:bool = False , _a:bool = False , _a:bool = False , _a:bool = True , _a:Optional[Union[str, TensorType]] = None , **_a:List[Any] , ):
if images is None and text is None:
raise ValueError('''You have to specify at least images or text.''' )
snake_case__ = BatchFeature()
if text is not None:
snake_case__ = self.tokenizer(
text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
encoding.update(__lowerCAmelCase )
snake_case__ = self.qformer_tokenizer(
text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
snake_case__ = qformer_text_encoding.pop('''input_ids''' )
snake_case__ = qformer_text_encoding.pop('''attention_mask''' )
if images is not None:
snake_case__ = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase )
encoding.update(__lowerCAmelCase )
return encoding
def SCREAMING_SNAKE_CASE__ ( self:List[str] , *_a:str , **_a:Dict ):
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self:List[str] , *_a:List[str] , **_a:List[str] ):
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = self.tokenizer.model_input_names
snake_case__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def SCREAMING_SNAKE_CASE__ ( self:int , _a:Dict , **_a:Tuple ):
if os.path.isfile(__lowerCAmelCase ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
snake_case__ = os.path.join(__lowerCAmelCase , '''qformer_tokenizer''' )
self.qformer_tokenizer.save_pretrained(__lowerCAmelCase )
return super().save_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls:List[Any] , _a:Optional[int] , **_a:Any ):
snake_case__ = AutoTokenizer.from_pretrained(__lowerCAmelCase , subfolder='''qformer_tokenizer''' )
snake_case__ = cls._get_arguments_from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
args.append(__lowerCAmelCase )
return cls(*__lowerCAmelCase )
| 33 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __snake_case ( _lowercase):
snake_case__ : List[str] = "cvt"
def __init__( self : Any , __lowerCAmelCase : Union[str, Any]=3 , __lowerCAmelCase : List[str]=[7, 3, 3] , __lowerCAmelCase : int=[4, 2, 2] , __lowerCAmelCase : int=[2, 1, 1] , __lowerCAmelCase : str=[6_4, 1_9_2, 3_8_4] , __lowerCAmelCase : Dict=[1, 3, 6] , __lowerCAmelCase : Optional[Any]=[1, 2, 1_0] , __lowerCAmelCase : Dict=[4.0, 4.0, 4.0] , __lowerCAmelCase : Dict=[0.0, 0.0, 0.0] , __lowerCAmelCase : Union[str, Any]=[0.0, 0.0, 0.0] , __lowerCAmelCase : int=[0.0, 0.0, 0.1] , __lowerCAmelCase : Union[str, Any]=[True, True, True] , __lowerCAmelCase : str=[False, False, True] , __lowerCAmelCase : List[str]=["dw_bn", "dw_bn", "dw_bn"] , __lowerCAmelCase : List[Any]=[3, 3, 3] , __lowerCAmelCase : Dict=[1, 1, 1] , __lowerCAmelCase : str=[2, 2, 2] , __lowerCAmelCase : Optional[Any]=[1, 1, 1] , __lowerCAmelCase : Optional[Any]=[1, 1, 1] , __lowerCAmelCase : str=0.02 , __lowerCAmelCase : Any=1E-12 , **__lowerCAmelCase : int , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
_lowerCamelCase : Optional[int] = num_channels
_lowerCamelCase : int = patch_sizes
_lowerCamelCase : Optional[Any] = patch_stride
_lowerCamelCase : str = patch_padding
_lowerCamelCase : Any = embed_dim
_lowerCamelCase : Optional[Any] = num_heads
_lowerCamelCase : Dict = depth
_lowerCamelCase : Optional[int] = mlp_ratio
_lowerCamelCase : Any = attention_drop_rate
_lowerCamelCase : Any = drop_rate
_lowerCamelCase : Dict = drop_path_rate
_lowerCamelCase : Optional[int] = qkv_bias
_lowerCamelCase : int = cls_token
_lowerCamelCase : int = qkv_projection_method
_lowerCamelCase : Optional[Any] = kernel_qkv
_lowerCamelCase : List[str] = padding_kv
_lowerCamelCase : Tuple = stride_kv
_lowerCamelCase : Union[str, Any] = padding_q
_lowerCamelCase : Optional[Any] = stride_q
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : Optional[int] = layer_norm_eps
| 83 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case_ ( _lowercase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
super().__init__()
# make sure scheduler can always be converted to DDIM
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase )
@torch.no_grad()
def __call__( self , __lowerCAmelCase = 1 , __lowerCAmelCase = None , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 50 , __lowerCAmelCase = None , __lowerCAmelCase = "pil" , __lowerCAmelCase = True , ):
if isinstance(self.unet.config.sample_size , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Tuple = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
SCREAMING_SNAKE_CASE_ : List[Any] = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and len(__lowerCAmelCase ) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(__lowerCAmelCase )}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
SCREAMING_SNAKE_CASE_ : Tuple = randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__lowerCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
SCREAMING_SNAKE_CASE_ : Tuple = self.unet(__lowerCAmelCase , __lowerCAmelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.scheduler.step(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , eta=__lowerCAmelCase , use_clipped_model_output=__lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample
SCREAMING_SNAKE_CASE_ : Dict = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE_ : str = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ : Tuple = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCAmelCase )
| 345 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class __snake_case ( _lowercase):
snake_case__ : torch.FloatTensor
snake_case__ : torch.FloatTensor
class __snake_case ( _lowercase , _lowercase):
snake_case__ : int = 1
@register_to_config
def __init__( self : str , __lowerCAmelCase : int = 2_0_0_0 , __lowerCAmelCase : float = 0.15 , __lowerCAmelCase : float = 0.01 , __lowerCAmelCase : float = 13_48.0 , __lowerCAmelCase : float = 1E-5 , __lowerCAmelCase : int = 1 , ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = sigma_max
# setable values
_lowerCamelCase : Dict = None
self.set_sigmas(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : Optional[int] = None ):
"""simple docstring"""
return sample
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : float = None , __lowerCAmelCase : Union[str, torch.device] = None ):
"""simple docstring"""
_lowerCamelCase : Tuple = sampling_eps if sampling_eps is not None else self.config.sampling_eps
_lowerCamelCase : Optional[int] = torch.linspace(1 , __lowerCAmelCase , __lowerCAmelCase , device=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : float = None , __lowerCAmelCase : float = None , __lowerCAmelCase : float = None ):
"""simple docstring"""
_lowerCamelCase : List[str] = sigma_min if sigma_min is not None else self.config.sigma_min
_lowerCamelCase : int = sigma_max if sigma_max is not None else self.config.sigma_max
_lowerCamelCase : Any = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[Any] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
_lowerCamelCase : Optional[int] = torch.exp(torch.linspace(math.log(__lowerCAmelCase ) , math.log(__lowerCAmelCase ) , __lowerCAmelCase ) )
_lowerCamelCase : Tuple = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : int , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : Optional[torch.Generator] = None , __lowerCAmelCase : bool = True , ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
_lowerCamelCase : Tuple = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
_lowerCamelCase : Dict = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
_lowerCamelCase : Optional[int] = timesteps.to(self.discrete_sigmas.device )
_lowerCamelCase : Any = self.discrete_sigmas[timesteps].to(sample.device )
_lowerCamelCase : int = self.get_adjacent_sigma(__lowerCAmelCase , __lowerCAmelCase ).to(sample.device )
_lowerCamelCase : Any = torch.zeros_like(__lowerCAmelCase )
_lowerCamelCase : Any = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
_lowerCamelCase : Union[str, Any] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
_lowerCamelCase : List[Any] = diffusion.unsqueeze(-1 )
_lowerCamelCase : int = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
_lowerCamelCase : List[str] = randn_tensor(
sample.shape , layout=sample.layout , generator=__lowerCAmelCase , device=sample.device , dtype=sample.dtype )
_lowerCamelCase : List[Any] = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
_lowerCamelCase : int = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__lowerCAmelCase , prev_sample_mean=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : Optional[torch.Generator] = None , __lowerCAmelCase : bool = True , ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
_lowerCamelCase : Union[str, Any] = randn_tensor(sample.shape , layout=sample.layout , generator=__lowerCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
_lowerCamelCase : Union[str, Any] = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
_lowerCamelCase : Tuple = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
_lowerCamelCase : str = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
_lowerCamelCase : Tuple = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
_lowerCamelCase : Union[str, Any] = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
_lowerCamelCase : str = step_size.unsqueeze(-1 )
_lowerCamelCase : Any = sample + step_size * model_output
_lowerCamelCase : int = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : torch.FloatTensor , ):
"""simple docstring"""
_lowerCamelCase : Dict = timesteps.to(original_samples.device )
_lowerCamelCase : Union[str, Any] = self.discrete_sigmas.to(original_samples.device )[timesteps]
_lowerCamelCase : Union[str, Any] = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__lowerCAmelCase ) * sigmas[:, None, None, None]
)
_lowerCamelCase : int = noise + original_samples
return noisy_samples
def __len__( self : Optional[int] ):
"""simple docstring"""
return self.config.num_train_timesteps
| 83 | 0 |
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
def _UpperCAmelCase ( A , A , A ):
'''simple docstring'''
UpperCAmelCase__ =UniSpeechSatForSequenceClassification.from_pretrained(A_ , config=A_ )
UpperCAmelCase__ =downstream_dict['''projector.weight''']
UpperCAmelCase__ =downstream_dict['''projector.bias''']
UpperCAmelCase__ =downstream_dict['''model.post_net.linear.weight''']
UpperCAmelCase__ =downstream_dict['''model.post_net.linear.bias''']
return model
def _UpperCAmelCase ( A , A , A ):
'''simple docstring'''
UpperCAmelCase__ =UniSpeechSatForAudioFrameClassification.from_pretrained(A_ , config=A_ )
UpperCAmelCase__ =downstream_dict['''model.linear.weight''']
UpperCAmelCase__ =downstream_dict['''model.linear.bias''']
return model
def _UpperCAmelCase ( A , A , A ):
'''simple docstring'''
UpperCAmelCase__ =UniSpeechSatForXVector.from_pretrained(A_ , config=A_ )
UpperCAmelCase__ =downstream_dict['''connector.weight''']
UpperCAmelCase__ =downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
UpperCAmelCase__ =downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
UpperCAmelCase__ =downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
UpperCAmelCase__ =downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
UpperCAmelCase__ =downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
UpperCAmelCase__ =downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
UpperCAmelCase__ =downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
UpperCAmelCase__ =downstream_dict['''objective.W''']
return model
@torch.no_grad()
def _UpperCAmelCase ( A , A , A , A ):
'''simple docstring'''
UpperCAmelCase__ =torch.load(A_ , map_location="cpu" )
UpperCAmelCase__ =checkpoint['''Downstream''']
UpperCAmelCase__ =UniSpeechSatConfig.from_pretrained(A_ )
UpperCAmelCase__ =WavaVecaFeatureExtractor.from_pretrained(
A_ , return_attention_mask=A_ , do_normalize=A_ )
UpperCAmelCase__ =hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
UpperCAmelCase__ =convert_classification(A_ , A_ , A_ )
elif arch.endswith("ForAudioFrameClassification" ):
UpperCAmelCase__ =convert_diarization(A_ , A_ , A_ )
elif arch.endswith("ForXVector" ):
UpperCAmelCase__ =convert_xvector(A_ , A_ , A_ )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
UpperCAmelCase__ =checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(A_ )
hf_model.save_pretrained(A_ )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
UpperCamelCase_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 625 |
"""simple docstring"""
from torch import nn
def snake_case_ ( A_ : int ):
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F'''Unsupported activation function: {act_fn}''' )
| 83 | 0 |
import heapq
def a_ ( SCREAMING_SNAKE_CASE__ : dict ):
'''simple docstring'''
_lowerCamelCase : list[list] =[]
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(A_ , [-1 * len(A_ ), (key, value)] )
# chosen_vertices = set of chosen vertices
_lowerCamelCase : str =set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
_lowerCamelCase : Dict =heapq.heappop(A_ )[1][0]
chosen_vertices.add(A_ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
_lowerCamelCase : List[str] =elem[1][1].index(A_ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(A_ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 464 |
"""simple docstring"""
def snake_case_ ( A_ : int, A_ : int ):
'''simple docstring'''
return int(input_a == input_a == 0 )
def snake_case_ ( ):
'''simple docstring'''
print('''Truth Table of NOR Gate:''' )
print('''| Input 1 | Input 2 | Output |''' )
print(F'''| 0 | 0 | {nor_gate(0, 0 )} |''' )
print(F'''| 0 | 1 | {nor_gate(0, 1 )} |''' )
print(F'''| 1 | 0 | {nor_gate(1, 0 )} |''' )
print(F'''| 1 | 1 | {nor_gate(1, 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 83 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A ={'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'WavLMForAudioFrameClassification',
'WavLMForCTC',
'WavLMForSequenceClassification',
'WavLMForXVector',
'WavLMModel',
'WavLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 407 |
"""simple docstring"""
from __future__ import annotations
def snake_case_ ( A_ : list[list[int]] ):
'''simple docstring'''
for i in range(1, len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1, len(A_ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1, len(A_ ) ):
for j in range(1, len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j], matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 | 0 |
'''simple docstring'''
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class UpperCAmelCase ( _lowercase):
"""simple docstring"""
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
class UpperCAmelCase ( _lowercase , _lowercase):
"""simple docstring"""
lowerCAmelCase_ = 1
@register_to_config
def __init__( self : str , UpperCamelCase__ : int = 2000 , UpperCamelCase__ : float = 0.15 , UpperCamelCase__ : float = 0.01 , UpperCamelCase__ : float = 1348.0 , UpperCamelCase__ : float = 1E-5 , UpperCamelCase__ : int = 1 , ) -> Optional[Any]:
_UpperCamelCase =sigma_max
# setable values
_UpperCamelCase =None
self.set_sigmas(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def UpperCamelCase__ ( self : Any , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[int] = None ) -> Union[str, Any]:
return sample
def UpperCamelCase__ ( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : float = None , UpperCamelCase__ : Union[str, torch.device] = None ) -> Tuple:
_UpperCamelCase =sampling_eps if sampling_eps is not None else self.config.sampling_eps
_UpperCamelCase =torch.linspace(1 , __lowerCAmelCase , __lowerCAmelCase , device=__lowerCAmelCase )
def UpperCamelCase__ ( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : float = None , UpperCamelCase__ : float = None , UpperCamelCase__ : float = None ) -> Tuple:
_UpperCamelCase =sigma_min if sigma_min is not None else self.config.sigma_min
_UpperCamelCase =sigma_max if sigma_max is not None else self.config.sigma_max
_UpperCamelCase =sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__lowerCAmelCase , __lowerCAmelCase )
_UpperCamelCase =sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
_UpperCamelCase =torch.exp(torch.linspace(math.log(__lowerCAmelCase ) , math.log(__lowerCAmelCase ) , __lowerCAmelCase ) )
_UpperCamelCase =torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def UpperCamelCase__ ( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] ) -> Optional[int]:
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def UpperCamelCase__ ( self : Optional[int] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[torch.Generator] = None , UpperCamelCase__ : bool = True , ) -> Tuple:
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
_UpperCamelCase =timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
_UpperCamelCase =(timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
_UpperCamelCase =timesteps.to(self.discrete_sigmas.device )
_UpperCamelCase =self.discrete_sigmas[timesteps].to(sample.device )
_UpperCamelCase =self.get_adjacent_sigma(__lowerCAmelCase , __lowerCAmelCase ).to(sample.device )
_UpperCamelCase =torch.zeros_like(__lowerCAmelCase )
_UpperCamelCase =(sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
_UpperCamelCase =diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
_UpperCamelCase =diffusion.unsqueeze(-1 )
_UpperCamelCase =drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
_UpperCamelCase =randn_tensor(
sample.shape , layout=sample.layout , generator=__lowerCAmelCase , device=sample.device , dtype=sample.dtype )
_UpperCamelCase =sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
_UpperCamelCase =prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__lowerCAmelCase , prev_sample_mean=__lowerCAmelCase )
def UpperCamelCase__ ( self : List[Any] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[torch.Generator] = None , UpperCamelCase__ : bool = True , ) -> str:
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
_UpperCamelCase =randn_tensor(sample.shape , layout=sample.layout , generator=__lowerCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
_UpperCamelCase =torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
_UpperCamelCase =torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
_UpperCamelCase =(self.config.snr * noise_norm / grad_norm) ** 2 * 2
_UpperCamelCase =step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
_UpperCamelCase =step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
_UpperCamelCase =step_size.unsqueeze(-1 )
_UpperCamelCase =sample + step_size * model_output
_UpperCamelCase =prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowerCAmelCase )
def UpperCamelCase__ ( self : Any , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.FloatTensor , ) -> str:
_UpperCamelCase =timesteps.to(original_samples.device )
_UpperCamelCase =self.discrete_sigmas.to(original_samples.device )[timesteps]
_UpperCamelCase =(
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__lowerCAmelCase ) * sigmas[:, None, None, None]
)
_UpperCamelCase =noise + original_samples
return noisy_samples
def __len__( self : Optional[int] ) -> Tuple:
return self.config.num_train_timesteps
| 404 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('''T''')
class __snake_case ( Generic[T]):
def __init__( self : int , __lowerCAmelCase : T ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = data
_lowerCamelCase : Node[T] | None = None
def __str__( self : Optional[Any] ):
"""simple docstring"""
return f'''{self.data}'''
class __snake_case ( Generic[T]):
def __init__( self : int ):
"""simple docstring"""
_lowerCamelCase : Node[T] | None = None
def __iter__( self : str ):
"""simple docstring"""
_lowerCamelCase : List[str] = self.top
while node:
yield node.data
_lowerCamelCase : Any = node.next
def __str__( self : int ):
"""simple docstring"""
return "->".join([str(__lowerCAmelCase ) for item in self] )
def __len__( self : int ):
"""simple docstring"""
return len(tuple(iter(self ) ) )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return self.top is None
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : T ):
"""simple docstring"""
_lowerCamelCase : Tuple = Node(__lowerCAmelCase )
if not self.is_empty():
_lowerCamelCase : Optional[int] = self.top
_lowerCamelCase : List[str] = node
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
if self.is_empty():
raise IndexError('''pop from empty stack''' )
assert isinstance(self.top , __lowerCAmelCase )
_lowerCamelCase : Any = self.top
_lowerCamelCase : Any = self.top.next
return pop_node.data
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
if self.is_empty():
raise IndexError('''peek from empty stack''' )
assert self.top is not None
return self.top.data
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : List[str] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 83 | 0 |
lowerCamelCase__ = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowerCamelCase__ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowerCamelCase__ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 122 |
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCAmelCase__ = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase__ = importlib.util.spec_from_file_location(
'''transformers''',
os.path.join(PATH_TO_TRANSFORMERS, '''__init__.py'''),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
lowerCAmelCase__ = spec.loader.load_module()
lowerCAmelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCAmelCase__ = re.compile('''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
lowerCAmelCase__ = {
'''CLIPConfigMixin''',
'''DecisionTransformerConfigMixin''',
'''EncoderDecoderConfigMixin''',
'''RagConfigMixin''',
'''SpeechEncoderDecoderConfigMixin''',
'''VisionEncoderDecoderConfigMixin''',
'''VisionTextDualEncoderConfigMixin''',
}
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Any = []
for config_class in list(CONFIG_MAPPING.values() ):
_lowerCamelCase : Tuple = False
# source code of `config_class`
_lowerCamelCase : int = inspect.getsource(A_ )
_lowerCamelCase : str = _re_checkpoint.findall(A_ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
_lowerCamelCase , _lowerCamelCase : Tuple = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
_lowerCamelCase : Tuple = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
_lowerCamelCase : Union[str, Any] = True
break
_lowerCamelCase : Tuple = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(A_ )
if len(A_ ) > 0:
_lowerCamelCase : Union[str, Any] = '''\n'''.join(sorted(A_ ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 83 | 0 |
"""simple docstring"""
import os
import time
import numpy as np
import onnxruntime as ort
_lowerCAmelCase = """1"""
_lowerCAmelCase = """0"""
_lowerCAmelCase = """1"""
_lowerCAmelCase = ort.SessionOptions()
_lowerCAmelCase = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print("""Create inference session...""")
_lowerCAmelCase = ["""TensorrtExecutionProvider""", """CUDAExecutionProvider"""]
_lowerCAmelCase = ort.InferenceSession("""model.onnx""", sess_options=sess_opt, providers=execution_provider)
_lowerCAmelCase = ort.RunOptions()
_lowerCAmelCase = 1_2_8
_lowerCAmelCase = 1
_lowerCAmelCase = np.ones((batch, sequence), dtype=np.intaa)
_lowerCAmelCase = np.ones((batch, sequence), dtype=np.intaa)
_lowerCAmelCase = np.ones((batch, sequence), dtype=np.intaa)
print("""Warm up phase...""")
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("""Start inference...""")
_lowerCAmelCase = time.time()
_lowerCAmelCase = 2_0_0_0
_lowerCAmelCase = {}
for iter in range(max_iters):
_lowerCAmelCase = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("""Average Inference Time = {:.3f} ms""".format((time.time() - start_time) * 1_0_0_0 / max_iters))
| 259 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
lowerCAmelCase__ = False
class __snake_case ( unittest.TestCase):
pass
@nightly
@require_torch_gpu
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : int = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : str = '''A painting of a squirrel eating a burger '''
_lowerCamelCase : Dict = torch.manual_seed(0 )
_lowerCamelCase : List[Any] = pipe(
prompt=__lowerCAmelCase , generator=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : Tuple = VersatileDiffusionTextToImagePipeline.from_pretrained(__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : int = generator.manual_seed(0 )
_lowerCamelCase : List[str] = pipe(
prompt=__lowerCAmelCase , generator=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = VersatileDiffusionTextToImagePipeline.from_pretrained(
'''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = '''A painting of a squirrel eating a burger '''
_lowerCamelCase : Optional[int] = torch.manual_seed(0 )
_lowerCamelCase : int = pipe(
prompt=__lowerCAmelCase , generator=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' ).images
_lowerCamelCase : List[str] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : Dict = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 83 | 0 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCamelCase ( _lowercase ):
__UpperCamelCase =["image_processor", "tokenizer"]
__UpperCamelCase ="Pix2StructImageProcessor"
__UpperCamelCase =("T5Tokenizer", "T5TokenizerFast")
def __init__( self : Tuple , snake_case__ : List[Any] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = False
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
def __call__( self : str , snake_case__ : Dict=None , snake_case__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , snake_case__ : bool = True , snake_case__ : Union[bool, str, PaddingStrategy] = False , snake_case__ : Union[bool, str, TruncationStrategy] = None , snake_case__ : Optional[int] = None , snake_case__ : Optional[int] = 2_0_4_8 , snake_case__ : int = 0 , snake_case__ : Optional[int] = None , snake_case__ : Optional[bool] = None , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = True , snake_case__ : Optional[Union[str, TensorType]] = None , **snake_case__ : Optional[Any] , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None and not self.image_processor.is_vqa:
SCREAMING_SNAKE_CASE = self.tokenizer
SCREAMING_SNAKE_CASE = self.tokenizer(
text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
SCREAMING_SNAKE_CASE = self.image_processor(
__lowerCAmelCase , return_tensors=__lowerCAmelCase , max_patches=__lowerCAmelCase , **__lowerCAmelCase )
else:
# add pixel_values and bbox
SCREAMING_SNAKE_CASE = self.image_processor(
__lowerCAmelCase , return_tensors=__lowerCAmelCase , max_patches=__lowerCAmelCase , header_text=__lowerCAmelCase , **__lowerCAmelCase )
if text is not None and not self.image_processor.is_vqa:
SCREAMING_SNAKE_CASE = self.tokenizer(
text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
if "attention_mask" in text_encoding:
SCREAMING_SNAKE_CASE = text_encoding.pop('attention_mask' )
if "input_ids" in text_encoding:
SCREAMING_SNAKE_CASE = text_encoding.pop('input_ids' )
else:
SCREAMING_SNAKE_CASE = None
if text_encoding is not None:
encoding_image_processor.update(__lowerCAmelCase )
return encoding_image_processor
def UpperCamelCase ( self : Tuple , *snake_case__ : str , **snake_case__ : Any ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def UpperCamelCase ( self : Dict , *snake_case__ : List[str] , **snake_case__ : Optional[int] ):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 439 |
"""simple docstring"""
import os
import time
import numpy as np
import onnxruntime as ort
lowerCAmelCase__ = '''1'''
lowerCAmelCase__ = '''0'''
lowerCAmelCase__ = '''1'''
lowerCAmelCase__ = ort.SessionOptions()
lowerCAmelCase__ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('''Create inference session...''')
lowerCAmelCase__ = ['''TensorrtExecutionProvider''', '''CUDAExecutionProvider''']
lowerCAmelCase__ = ort.InferenceSession('''model.onnx''', sess_options=sess_opt, providers=execution_provider)
lowerCAmelCase__ = ort.RunOptions()
lowerCAmelCase__ = 128
lowerCAmelCase__ = 1
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
print('''Warm up phase...''')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Start inference...''')
lowerCAmelCase__ = time.time()
lowerCAmelCase__ = 2000
lowerCAmelCase__ = {}
for iter in range(max_iters):
lowerCAmelCase__ = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Average Inference Time = {:.3f} ms'''.format((time.time() - start_time) * 1000 / max_iters))
| 83 | 0 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __A ):
"""simple docstring"""
lowerCamelCase : Any = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
lowerCamelCase : Any = len(__lowerCAmelCase ) - 1
def _snake_case ( self , __A ):
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowerCamelCase : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , __lowerCAmelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(__lowerCAmelCase ) , 5 ) == 1
return output_values
def _snake_case ( self , __A ):
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowerCamelCase : Optional[int] = self.basis_function(__lowerCAmelCase )
lowerCamelCase : Optional[Any] = 0.0
lowerCamelCase : Any = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def _snake_case ( self , __A = 0.01 ):
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
lowerCamelCase : list[float] = [] # x coordinates of points to plot
lowerCamelCase : list[float] = [] # y coordinates of points to plot
lowerCamelCase : List[Any] = 0.0
while t <= 1:
lowerCamelCase : List[Any] = self.bezier_curve_function(__lowerCAmelCase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
lowerCamelCase : List[str] = [i[0] for i in self.list_of_points]
lowerCamelCase : Optional[int] = [i[1] for i in self.list_of_points]
plt.plot(
__lowerCAmelCase , __lowerCAmelCase , color="blue" , label="Curve of Degree " + str(self.degree ) , )
plt.scatter(__lowerCAmelCase , __lowerCAmelCase , color="red" , label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 340 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def snake_case_ ( A_ : float, A_ : float, A_ : int ):
'''simple docstring'''
_lowerCamelCase : List[Any] = x
_lowerCamelCase : List[Any] = y
for step in range(A_ ): # noqa: B007
_lowerCamelCase : Dict = a * a - b * b + x
_lowerCamelCase : List[str] = 2 * a * b + y
_lowerCamelCase : Any = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def snake_case_ ( A_ : float ):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def snake_case_ ( A_ : float ):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(A_, 1, 1 ) )
def snake_case_ ( A_ : int = 8_00, A_ : int = 6_00, A_ : float = -0.6, A_ : float = 0, A_ : float = 3.2, A_ : int = 50, A_ : bool = True, ):
'''simple docstring'''
_lowerCamelCase : Tuple = Image.new('''RGB''', (image_width, image_height) )
_lowerCamelCase : int = img.load()
# loop through the image-coordinates
for image_x in range(A_ ):
for image_y in range(A_ ):
# determine the figure-coordinates based on the image-coordinates
_lowerCamelCase : Optional[Any] = figure_width / image_width * image_height
_lowerCamelCase : List[Any] = figure_center_x + (image_x / image_width - 0.5) * figure_width
_lowerCamelCase : Optional[Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
_lowerCamelCase : str = get_distance(A_, A_, A_ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_lowerCamelCase : Dict = get_color_coded_rgb(A_ )
else:
_lowerCamelCase : str = get_black_and_white_rgb(A_ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowerCAmelCase__ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 83 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.